示例#1
14
class RedisManager(NoSqlManager):
    def __init__(self,
                 namespace,
                 url=None,
                 data_dir=None,
                 lock_dir=None,
                 **params):
        self.db = params.pop('db', None)
        self.dbpass = params.pop('password', None)
        self.connection_pool = params.get('redis_connection_pool', None)
        self.expires = params.get('expires', params.get('expiretime', None))
        NoSqlManager.__init__(self,
                              namespace,
                              url=url,
                              data_dir=data_dir,
                              lock_dir=lock_dir,
                              **params)

    def open_connection(self, host, port, **params):
        if not self.connection_pool:
            self.connection_pool = ConnectionPool(host=host, port=port, db=self.db,
                    password=self.dbpass)
        self.db_conn = StrictRedis(connection_pool=self.connection_pool, **params)
    
    def __contains__(self, key):
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)
        # beaker.container.Value.set_value calls NamespaceManager.set_value
        # however it (until version 1.6.4) never sets expiretime param.
        #
        # Checking "type(value) is tuple" is a compromise
        # because Manager class can be instantiated outside container.py (See: session.py)
        if (expiretime is None) and (type(value) is tuple):
            expiretime = value[1]
        # If the machinery above fails, then pickup the expires time from the
        # init params.
        if not expiretime and self.expires is not None:
            expiretime = self.expires
        # Set or setex, according to whether we got an expires time or not.
        if expiretime:
            self.db_conn.setex(key, expiretime, pickle.dumps(value, 2))
        else:
            self.db_conn.set(key, pickle.dumps(value, 2))

    def __delitem__(self, key):
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def _format_pool_key(self, host, port, db):
        return '{0}:{1}:{2}'.format(host, port, self.db)

    def do_remove(self):
        self.db_conn.flush()

    def keys(self):
        return self.db_conn.keys('beaker:%s:*' % self.namespace)
示例#2
0
def load(obj, namespace=None, silent=True, key=None):
    """
    Reads and loads in to "settings" a single key or all keys from redis
    :param obj: the settings instance
    :param namespace: settings namespace default='DYNACONF'
    :param silent: if errors should raise
    :param key: if defined load a single key, else load all in namespace
    :return: None
    """
    redis = StrictRedis(**obj.REDIS_FOR_DYNACONF)
    namespace = namespace or obj.DYNACONF_NAMESPACE
    holder = "DYNACONF_%s" % namespace
    try:
        if key:
            value = parse_conf_data(redis.hget(holder.upper(), key))
            if value:
                obj.set(key, value)
        else:
            data = {
                key: parse_conf_data(value)
                for key, value in redis.hgetall(holder.upper()).items()
            }
            if data:
                obj.update(data, loader_identifier=IDENTIFIER)
    except Exception as e:
        e.message = 'Unable to load config from redis (%s)' % e.message
        if silent:
            obj.logger.error(e.message)
            return False
        raise
示例#3
0
def feed_db(container_id, stats):
    """ Store data to Redis.
        args:
         - constainer_id : (str) container's hash 12 first characters
         - stats : a dictionary of stats
    """
    if DEBUG:
        print('feed db with container {} stats'.format(container_id))

    # convert the time provided by stats to UTC format, parse it with strptime,
    # and transform it again to the desired REDIS_KEY_TIMESTAMP format
    instant_str = stats['read'][:-9]+stats['read'][-6:].replace(':', '')
    instant = datetime.strptime(instant_str, '%Y-%m-%dT%H:%M:%S.%f%z')
    timestamp = instant.strftime(REDIS_KEY_TIMESTAMP)

    r = StrictRedis(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB)
    for resource, value in stats.items():
        if resource != 'read':
            key = REDIS_KEY.format(timestamp=timestamp,
                                   container_id=container_id,
                                   resource=resource)

            r.set(key, dumps(value))
            r.expire(key, REDIS_EXPIRE_TIME)

            if DEBUG:
                print("Stored {} => {}".format(key, value))
示例#4
0
def fetch_questions_ajax(request):
    current_user = request.user
    '''
    query the database and find out whether questionpaper is made
    or not for this user, if made then return the questions with
    shuffled choices along with the answer of user, if not then
    the same thing but answers will be empty or non existent
    '''
    questions = QuestionPaper.objects.all().filter(user=current_user)
    question_list = []
    if not questions:
        # make new questions here
        print "making new questions for the user:"******"now setting the key "+key+", which will expire in "+str(seconds)+" seconds.";
        rd = StrictRedis()
        rd.setex(key, seconds, "1")
    else:
        question_list = map(get_dictionary_from_question_paper, list(questions))

    data = {"data":question_list}
    return JsonResponse(data);
示例#5
0
文件: redismq.py 项目: ezbake/redisMQ
class RedisProducer(object):
    def __init__(self, hostname = 'localhost', port = 6379):
        log.debug("Initializing RedisProducer with hostname of %s and port %s" % (hostname, port))
        self.r = StrictRedis(host = hostname, port = port)

    def send(self, message):
        tries = 0
        next_index_key = get_next_index_for_topic_key(message.topic)
        next_index = 1
        result = None
        log.debug("Sending message on topic %s" % message.topic)

        while result is None and tries < TRIES_LIMIT:
            if self.r.exists(next_index_key):
                next_index = long(self.r.get(next_index_key)) + 1

            message_key = get_message_key(message.topic, next_index)

            try:
                pl = self.r.pipeline()
                pl.watch(next_index_key, message_key)
                pl.multi()
                pl.incr(next_index_key).set(message_key, message.payload)
                result = pl.execute()
            except WatchError:
                # Should probably log something here, but all it means is we're
                # retrying
                pass

        if result is None:
            log.error("Could not send message, retry amount exceeded")
            raise RuntimeError("Attempted to send message %s times and failed" % TRIES_LIMIT)
示例#6
0
文件: srv.py 项目: lwzm/bb
def load_data(ids, host, port):
    """from redis
    uniq in index
    """
    from redis import StrictRedis
    from json import loads

    db = StrictRedis(host, port, decode_responses=True)
    logging.debug("all: %d", len(ids))

    if 0 in ids:
        raise KeyError(0)

    from collections import Counter
    checker = Counter(ids)
    if len(checker) != len(ids):  # not unique
        raise ValueError(checker.most_common(3))

    pipe = db.pipeline()
    ids.insert(0, 0)
    for i in ids:
        pipe.hgetall(i)
    properties = pipe.execute(True)  # DO NOT allow error occurs in redis

    raw = {}
    for i, p in zip(ids, properties):
        raw[i] = {k: loads(v) for k, v in p.items()}
    return raw
 def test_get_text(self):
     r = StrictRedis()
     r.set('test_get_2', "Open Mining")
     DW = DataWarehouse()
     self.assertEquals(
         DW.get("test_get_2", content_type='application/text'),
         "Open Mining")
示例#8
0
 def sync_get(self, identity, *args, **kwargs):
     """
     For getting data from cache
     :param identity: Unique Integer for the data
     :param args: Args for the sync function. (Default: None)
     """
     redis = StrictRedis(connection_pool=self.redis_pool)
     key = key_generator(self.key, identity)
     try:
         if redis.exists(key):
             data = self.get_func(redis.get(key))
         else:
             data = self.sync_func(identity, *args, **kwargs)
             if self.expire:
                 self._setex(redis, key, self.set_func(data))
             else:
                 redis.set(key, self.set_func(data))
         if data is not None or data != "":
             return data
         return None
     except RedisError as re:
         self.log.error("[REDIS] %s", str(re))
         data = self.sync_func(identity, args)
         return data
     finally:
         del redis
示例#9
0
    def __admin_handler(self, endpoint: bytes):
        """
        Handle Admin Request

        :param bytes endpoint: Endpoint (in bytes!)
        :return: jsonified answer data
        """
        json_data = request.get_json()
        if json_data is None:
            raise BadRequest()
        if 'admin_token' not in json_data:
            raise BadRequest()
        admin_token = json_data['admin_token']
        if not isinstance(admin_token, str):
            raise BadRequest()
        if not RULE_TOKEN.match(admin_token):
            raise BadRequest()
        redis = StrictRedis(connection_pool=self.api_pool)
        ep_key = 'ADMIN_TOKEN:{:s}'.format(admin_token)
        should_endpoint = redis.get(ep_key)
        if should_endpoint is None:
            raise BadRequest()
        redis.delete(ep_key)
        if should_endpoint != endpoint:
            raise BadRequest()
        if 'data' not in json_data:
            raise BadRequest()
        data = json_data['data']
        if not isinstance(data, dict):
            raise BadRequest()
        return jsonify(self.queue_dispatcher({
            '_': 'admin:{:s}'.format(endpoint.decode('utf-8')),
            'data': data,
        }))
示例#10
0
class SutroTestCase(TestCase):
  def setUp(self):
    redis_connection_pool = ConnectionPool(**settings.WS4REDIS_CONNECTION)
    self.redis = StrictRedis(connection_pool=redis_connection_pool)
    self.client = Client()

  def tearDown(self):
    self.redis.flushdb()

  def random_string(self, length=None, str_type=None):
    DEFAULT_LENGTH = 10
    length = length if length else DEFAULT_LENGTH
    if str_type == 'number':
      string_type = string.digits
    else:
      string_type = string.lowercase
    return ''.join(random.choice(string_type) for x in range(length))

  def create_a_user(self, display_name, user_id=None, icon_url=None, user_url=None, rdio_key=None):
    user = User()
    user.display_name = display_name
    user.id = user_id if user_id else uuid.uuid4().hex
    user.icon_url = icon_url if icon_url else 'http://' + self.random_string() + '.jpg'
    user.user_url = user_url if user_url else 'http://' + self.random_string() + '.com/' + self.random_string()
    user.rdio_key = rdio_key if rdio_key else 's' + self.random_string(length=5, str_type='number')
    user.save(self.redis)
    return user

  def create_a_party(self, party_id, name):
    party = Party()
    party.id = party_id
    party.name = name
    party.save(self.redis)
    return party
示例#11
0
def render_task(dburl, docpath, slug):
    """Render a document."""
    oldcwd = os.getcwd()
    try:
        os.chdir(os.path.join(docpath, slug))
    except:
        db = StrictRedis.from_url(dburl)
        job = get_current_job(db)
        job.meta.update({'out': 'Document not found.', 'return': 127, 'status': False})
        return 127

    db = StrictRedis.from_url(dburl)
    job = get_current_job(db)
    job.meta.update({'out': '', 'milestone': 0, 'total': 1, 'return': None,
                     'status': None})
    job.save()

    p = subprocess.Popen(('lualatex', '--halt-on-error', slug + '.tex'),
                         stdout=subprocess.PIPE)

    out = []

    while p.poll() is None:
        nl = p.stdout.readline()
        out.append(nl)
        job.meta.update({'out': ''.join(out), 'return': None,
                         'status': None})
        job.save()

    out = ''.join(out)
    job.meta.update({'out': ''.join(out), 'return': p.returncode, 'status':
                     p.returncode == 0})
    job.save()
    os.chdir(oldcwd)
    return p.returncode
示例#12
0
 def redis(self):
     redis_config = self.config.get('redis')
     redis = StrictRedis(
         redis_config.get('host'), redis_config.get('port'), redis_config.get('db')
     )
     redis.ping()
     return redis
示例#13
0
def get_recent(**redis_kwargs):
    """ Retrieve recent messages from Redis, in reverse chronological order.
        
        Two lists are returned: one a single most-recent status message from
        each process, the other a list of numerous messages from each process.
        
        Each message is a tuple with floating point seconds elapsed, integer
        process ID that created it, and an associated text message such as
        "Got cache lock in 0.001 seconds" or "Started /osm/12/656/1582.png".
    
        Keyword args are passed directly to redis.StrictRedis().
    """
    pid = getpid()
    red = StrictRedis(**redis_kwargs)
    
    processes = []
    messages = []

    for key in red.keys('pid-*-statuses'):
        try:
            now = time()
            pid = int(key.split('-')[1])
            msgs = [msg.split(' ', 1) for msg in red.lrange(key, 0, _keep)]
            msgs = [(now - float(t), pid, msg) for (t, msg) in msgs]
        except:
            continue
        else:
            messages += msgs
            processes += msgs[:1]
    
    messages.sort() # youngest-first
    processes.sort() # youngest-first

    return processes, messages
示例#14
0
class RedisDataStore(DataStore):
    """Redis-backed datastore object."""

    def __init__(self, number=0):
        redis_host = os.environ.get('REDIS_PORT_6379_TCP_ADDR')
        redis_port = os.environ.get('REDIS_PORT_6379_TCP_PORT')
        self.redis_conn = StrictRedis(host=redis_host, port=redis_port,
                                      db=number)

    def __setitem__(self, k, v):
        self.redis_conn.set(k, v)

    def __getitem__(self, k):
        return self.redis_conn.get(k)

    def __delitem__(self, k):
        self.redis_conn.delete(k)

    def get(self, k):
        return self.redis_conn.get(k)

    def __contains__(self, k):
        return self.redis_conn.exists(k)

    def todict(self):
        #TODO(tvoran): use paginate
        #TODO(tvoran): do something besides multiple gets
        data = {}
        for key in self.redis_conn.keys():
            data[key] = self.get(key)
        return data

    def clear_all(self):
        self.redis_conn.flushdb()
示例#15
0
def get_mail_from_redis(num=10):
    r = StrictRedis('127.0.0.1', 6379)
    key = "QQMAIL"
    mail_list = r.lrange(key, 0, num)
    for i in range(num):
        r.lpop(key)
    return set(mail_list)
def add_tweets_to_redis(tweet_file):
    """
    'DVD: FBI WARNING Me: oh boy here we go DVD: The board advises you
    to have lots of fun watching this Hollywood movie Me: Ah.. It's a
    nice one'
    """
    redis_client = StrictRedis(host='localhost', port=6379, db=0)
    with open(tweet_file, 'r') as tweets:
        for line in tweets:
            # again, dealing with weird error here
            try:
                tweet = line.strip().split('|', 2)[2]
                # need to investigate whether one-by-one inserting
                # or building a list of tweets and doing a single insert
                # would be more efficient
                if not redis_client.sismember('tweets', tweet):
                    result = redis_client.sadd('tweets', tweet)
                    if not result:
                        print('error occurred adding tweet: "{}" to redis'
                              .format(tweet))
                else:
                    # found all new tweets
                    break
            except IndexError:
                continue
    redis_client.save()
示例#17
0
    def __init__(self, old_redis_url, new_redis_url, dry_run=True,
                 per_recording_list=False, s3_import=False, s3_root=None):
        self.old_redis = StrictRedis.from_url(old_redis_url, decode_responses=True)
        self.dry_run = dry_run
        self.per_recording_list = per_recording_list
        self.s3_import = s3_import

        if s3_import:
            assert(s3_root)
            import boto3
            self.s3_root = s3_root
            self.s3 = boto3.client('s3')
        else:
            self.s3_root = None
            self.s3 = None

        if self.dry_run:
            import redis
            redis.StrictRedis = fakeredis.FakeStrictRedis
            self.redis = FakeStrictRedis.from_url(new_redis_url, decode_responses=True)
        else:
            self.redis = StrictRedis.from_url(new_redis_url, decode_responses=True)

        print('Redis Inited')

        self.cli = CLIUserManager(new_redis_url)
示例#18
0
文件: helpers.py 项目: CIRCL/potiron
def check_running(name: str):
    socket_path = get_socket_path(name)
    try:
        r = StrictRedis(unix_socket_path=socket_path)
        return r.ping()
    except ConnectionError:
        return False
示例#19
0
 def test_list_preparation_with_invalid_type(self) -> None:
     """
     When a list key is already there with a wrong type
     """
     redis = StrictRedis(connection_pool=self.__pool)
     redis.set(self.__config['queue'], 'test')
     self.assertRaises(ValueError, RedisQueueAccess, self.__config)
示例#20
0
文件: tests.py 项目: reedboat/dcron
class JobStatsTest(TestCase):
    def setUp(self):
        self.redis = StrictRedis(**settings.REDISES['default'])
        self.stats = JobStats(self.redis)

    def tearDown(self):
        self.redis.delete('job_stats.1')

    def testIncr(self):
        job_id = 1
        field = 'success'
        count = self.stats.get(job_id, field)
        self.assertEqual(0, count)

        self.stats.incr(job_id, field, 1)
        count = self.stats.get(job_id, field)
        self.assertEqual(1, count)

        self.stats.incr(job_id, field, 2)
        count = self.stats.get(job_id, field)
        self.assertEqual(3, count)

        self.stats.incr(job_id, 'failed')
        self.stats.get(job_id, field)
        self.assertEqual(3, count)

    def testGet(self):
        job_id = 1
        self.stats.incr(job_id, 'failed', 1)
        self.stats.incr(job_id, 'success', 2)
        counts = self.stats.get(job_id)
        self.assertEqual(1, counts['failed'])
        self.assertEqual(2, counts['success'])
示例#21
0
class RedisManager(NoSqlManager):
    def __init__(self, namespace, url=None, data_dir=None, lock_dir=None, **params):
        self.connection_pool = params.pop('connection_pool', None)
        NoSqlManager.__init__(self, namespace, url=url, data_dir=data_dir, lock_dir=lock_dir, **params)

    def open_connection(self, host, port, **params):
        self.db_conn = StrictRedis(host=host, port=int(port), connection_pool=self.connection_pool, **params)

    def __contains__(self, key):
        log.debug('%s contained in redis cache (as %s) : %s'%(key, self._format_key(key), self.db_conn.exists(self._format_key(key))))
        return self.db_conn.exists(self._format_key(key))

    def set_value(self, key, value, expiretime=None):
        key = self._format_key(key)

        #XXX: beaker.container.Value.set_value calls NamespaceManager.set_value
        # however it(until version 1.6.3) never sets expiretime param. Why?

        if expiretime:
            self.db_conn.setex(key, expiretime, pickle.dumps(value))
        else:
            self.db_conn.set(key, pickle.dumps(value))

    def __delitem__(self, key):
        self.db_conn.delete(self._format_key(key))

    def _format_key(self, key):
        return 'beaker:%s:%s' % (self.namespace, key.replace(' ', '\302\267'))

    def do_remove(self):
        self.db_conn.flush()

    def keys(self):
        return self.db_conn.keys('beaker:%s:*' % self.namespace)
def test_token(redis_server):
    conn = StrictRedis(unix_socket_path=UDS_PATH)
    lock = Lock(conn, "foobar-tok")
    tok = lock.id
    assert conn.get(lock._name) is None
    lock.acquire(blocking=False)
    assert conn.get(lock._name) == tok
示例#23
0
def app(environ, start_response):
	ret = {}
	qs = dict(parse_qsl(environ["QUERY_STRING"]))
	red = StrictRedis(host="localhost", port=6379, db=0)
	root_key = red.get("root")
	if root_key is None: #Load trucks into redis if there is no tree already in it
		inp_file = "Mobile_Food_Facility_Permit.csv"
		load_trucks(inp_file, red)
		root_key = red.get("root")
	try:
		lat = float(qs["latitude"])
		lon = float(qs["longitude"])
		rad = float(qs["radius"])
	except KeyError: #Return error if required fields aren't present
		start_response("400 Bad Request", [("Content-type", "text/plain")])
		ret["error"] = "latitude, longitude, and radius query parameters are required"
		return [dumps(ret)]
	ret["latitude"] = lat
	ret["longitude"] = lon
	ret["radius"] = rad
	food = qs.get("food", "").upper()
	if food:
		ret["food"] = food
		ret["trucks"] = [str(t)
				for t in get_trucks(lat, lon, rad, red, root_key) if food in t.food]
	else:
		trucks = []
		foods = set()
		for t in get_trucks(lat, lon, rad, red, root_key):
			trucks.append(str(t))
			foods |= set(t.food)
		ret["trucks"] = trucks
		ret["foods"] = list(foods)
	start_response("200 OK", [("Content-type", "text/plain")])
	return [dumps(ret)]
示例#24
0
 def __init_redis(self):
     try:
         redis = StrictRedis()
         redis.ping()    # raises an exception if it failes
         self.redis = redis
     except:
         pass
示例#25
0
def test_lshash_redis():
    """
    Test external lshash module
    """
    config = {"redis": {"host": 'localhost', "port": 6379, "db": 15}}
    sr = StrictRedis(**config['redis'])
    sr.flushdb()

    lsh = LSHash(6, 8, 1, config)
    for i in xrange(num_elements):
        lsh.index(list(els[i]))
        lsh.index(list(els[i]))  # multiple insertions should be prevented by the library
    hasht = lsh.hash_tables[0]
    itms = [hasht.get_list(k) for k in hasht.keys()]
    for itm in itms:
        for el in itm:
            assert itms.count(itm) == 1  # have multiple insertions been prevented?
            assert el in els
    for el in els:
        res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
        el_v, el_dist = res
        assert el_v in els
        assert el_dist == 0
    del lsh
    sr.flushdb()
示例#26
0
def test_lshash_redis_extra_val():
    """
    Test external lshash module
    """
    config = {"redis": {"host": 'localhost', "port": 6379, "db": 15}}
    sr = StrictRedis(**config['redis'])
    sr.flushdb()

    lsh = LSHash(6, 8, 1, config)
    for i in xrange(num_elements):
        lsh.index(list(els[i]), el_names[i])
        lsh.index(list(els[i]), el_names[i])  # multiple insertions
    hasht = lsh.hash_tables[0]
    itms = [hasht.get_list(k) for k in hasht.keys()]
    for itm in itms:
        assert itms.count(itm) == 1
        for el in itm:
            assert el[0] in els
            assert el[1] in el_names
    for el in els:
        res = lsh.query(list(el), num_results=1, distance_func='euclidean')[0]
        # vector an name are in the first element of the tuple res[0]
        el_v, el_name = res[0]
        # the distance is in the second element of the tuple
        el_dist = res[1]
        assert el_v in els
        assert el_name in el_names
        assert el_dist == 0
    del lsh
    sr.flushdb()
示例#27
0
def show_search_results(query):
    print("""
Search Result:  {keyword}
==================================================

""".format(keyword=query))

    p = Persistent("minamo")
    r = StrictRedis(decode_responses=True)


    resultset = None
    for _query in query.split(" "):
        qgram = ngram.ngram(_query, 2)
        for bi in list(qgram)[:-1]:
            if resultset is None:
                resultset = set(r.lrange("minamo:bi:{}".format(bi), 0, -1))
            else:
                resultset = resultset & set(r.lrange("minamo:bi:{}".format(bi), 0, -1))

    for page in (p.load(models.Page, x) for x in resultset):
        if page.title is None:
            continue

        print("*", page.title)
        print(" ", page.url)
示例#28
0
文件: client.py 项目: Locu/djredis
  def __init__(self, hosts, options):
    sentinel_kwargs = self._get_sentinel_kwargs(options)
    node_kwargs = self._get_node_kwargs(options)

    masters = None
    # Try to fetch a list of all masters from any sentinel.
    hosts = list(hosts)
    shuffle(hosts) # Randomly sort sentinels before trying to bootstrap.
    for host, port in hosts:
      client = StrictRedis(host=host, port=port, **sentinel_kwargs)
      try:
        masters = client.sentinel_masters().keys()
        break
      except RedisError:
        pass
    if masters is None:
      # No Sentinel responded successfully?
      raise errors.MastersListUnavailable
    if not len(masters):
      # The masters list was empty?
      raise errors.NoMastersConfigured

    sentinel_kwargs.update({
      # Sentinels connected to fewer sentinels than `MIN_SENTINELS` will
      # be ignored.
      'min_other_sentinels': options.get('MIN_SENTINELS',
                                         len(hosts) / 2),
      })
    self.sentinel = Sentinel(hosts, **sentinel_kwargs)
    masters = [self.sentinel.master_for(name, **node_kwargs)
               for name in masters]
    super(SentinelBackedRingClient, self).__init__(masters, options)
示例#29
0
文件: views.py 项目: am1ty9d9v/oposod
def send_friend_request(request, recipient_id):
    if request.is_ajax():
        recipient = get_object_or_404(User, id=recipient_id)
        redis_obj = StrictRedis(db=9)
        try:
            redis_obj.publish('notifications:%s' % recipient.username, 1)
        except Exception, err:
            print err

        fr_obj = None

        try:
            fr_obj = FriendRequest.objects.get(
                sender=request.user, recipient=recipient)
        except:
            fr_obj = FriendRequest.objects.create(
                sender=request.user, recipient=recipient)

        # Also create an entry in redis for this user.
        # PubSub only works if the user is online and subscribed to the live
        # stream
        try:
            # This is creating error;;
            # hmset() takes exactly 3 arguments (10 given)
            redis_obj.hmset(
                'user:notify:%s' % request.user.id,
                'obj_name', fr_obj._meta.object_name,
                'obj_id', fr_obj.id,
                'time_of', int(time()),
                'was_viewed', 'false')
        except:
            pass
示例#30
0
文件: views.py 项目: dreganism/lore
def get_redis_info():
    """Check Redis connection."""
    try:
        url = settings.BROKER_URL
        _, host, port, _, password, db, _ = parse_redis_url(url)
    except AttributeError:
        log.error("No valid Redis connection info found in settings.")
        return {"status": NO_CONFIG}

    start = datetime.now()
    try:
        rdb = StrictRedis(
            host=host, port=port, db=db,
            password=password, socket_timeout=TIMEOUT_SECONDS,
        )
        info = rdb.info()
    except (RedisConnectionError, TypeError) as ex:
        log.error("Error making Redis connection: %s", ex.args)
        return {"status": DOWN}
    except RedisResponseError as ex:
        log.error("Bad Redis response: %s", ex.args)
        return {"status": DOWN, "message": "auth error"}
    micro = (datetime.now() - start).microseconds
    del rdb  # the redis package does not support Redis's QUIT.
    ret = {
        "status": UP, "response_microseconds": micro,
    }
    fields = ("uptime_in_seconds", "used_memory", "used_memory_peak")
    ret.update({x: info[x] for x in fields})
    return ret
示例#31
0
from flask_session import Session
from redis import StrictRedis
from flask import Flask, session
# from flask_session import Session

app = Flask(__name__)

app.config['SESSION_TYPE'] = 'redis'  #设置存储系统
app.config['SESSION_REDIS'] = StrictRedis(host='192.168.59.128', port=6379)
app.config['SESSION_USE_SIGNER'] = True
app.config['SECRET_KEY'] = 'test'  #设置应用的密钥

#初始化组件
Session(app)


@app.route('/index')
def home():
    session['userid'] = 11
    return 'index'


if __name__ == '__main__':
    app.run(debug=True)
示例#32
0
from telethon.sessions import StringSession

from .dB.core import *
from .dB.database import Var
from .misc import *
from .utils import *
from .version import __version__

LOGS = getLogger(__name__)

try:
    redis_info = Var.REDIS_URI.split(":")
    udB = StrictRedis(
        host=redis_info[0],
        port=redis_info[1],
        password=Var.REDIS_PASSWORD,
        charset="utf-8",
        decode_responses=True,
    )
except ConnectionError as ce:
    wr(f"ERROR - {ce}")
    exit(1)
except ResponseError as res:
    wr(f"ERROR - {res}")
    exit(1)

if not Var.API_ID or not Var.API_HASH:
    wr("No API_ID or API_HASH found.    Quiting...")
    exit(1)

BOT_MODE = Var.BOT_MODE or udB.get("BOT_MODE")
# -*- coding: utf-8 -*-

import time
import random
from threading import Thread, active_count
from queue import Queue
import requests
from lxml import html
from debug.my_logging import create_logger
import csv
from redis import StrictRedis

my_logger = create_logger(__name__)
csv_writer = csv.writer(open("my_job.csv", "a", encoding="utf-8"))
redis = StrictRedis(host="127.0.0.1", port=63796)

# 页面抓取成功
start_url = "https://search.51job.com/list/000000,000000,0000,00,9,99,python,2,1.html" \
            "?lang=c&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99" \
            "&ord_field=0&dibiaoid=0&line=&welfare="
# 待爬取links队列
# 岗位搜索列表页面链接队列
list_links_queue = Queue()
list_links_queue.put(start_url)
# 岗位详情列表页面链接队列
job_links_queue = Queue()

# 已爬取links,已经爬取过的link添加到已爬取,防止重复爬取
crawled_links = set()
redis.delete("crawled_links")
示例#34
0
    def get_json_data(self):
        q = {'event_type': self.event_type.value}
        try:
            # filter out old entries
            date_from = int(self.request.GET['from']
                            ) + 1  # offset because of timestamp rounding
            q['date_created__gt'] = timezone.make_aware(
                datetime.datetime.fromtimestamp(date_from), timezone.utc)
        except (ValueError, KeyError):
            pass

        contestant = self.object
        try:
            latest_date = contestant.corrections.filter(
                event_type=self.event_type.value).latest().date_created_utc
        except contest.models.ContestantCorrection.DoesNotExist:
            latest_date = None
        # we order by date_created *asc* as it will be *prepended* on the frontend
        corrections_qs = contestant.corrections.select_related(
            'author').filter(**q).order_by('date_created').distinct()
        corr_tpl = get_template('correction/chunk-correction.html')
        corrections = [
            corr_tpl.render({'corr': corr}) for corr in corrections_qs
        ]
        changes = {}
        latest_correction = corrections_qs.last()
        if latest_correction and latest_correction.changes:
            changes = latest_correction.changes

        # presence, through redis
        online_users = [self.request.user]
        online_alive = False
        try:
            self_pk = self.request.user.pk
            timeout = settings.CORRECTION_LIVE_UPDATE_TIMEOUT
            now = timezone.make_naive(timezone.now(), timezone.utc).timestamp()
            moments_ago = now - timeout
            key = settings.CORRECTION_LIVE_UPDATE_REDIS_KEY.format(
                key=contestant.pk)
            client = StrictRedis(**settings.PROLOGIN_UTILITY_REDIS_STORE)
            client.expire(
                key, timeout * 2
            )  # garbage collect the whole set after a bit if no further updates
            client.zadd(key, {self_pk: now})  # add self to the set
            members = client.zrangebyscore(
                key, moments_ago, '+inf')  # list people that are recent enough
            members = set(int(pk)
                          for pk in members) - {self_pk}  # exclude self
            online_users.extend(
                User.objects.filter(pk__in=members).order_by('pk'))
            online_alive = True
        except Exception:
            # it's not worth trying harder
            pass

        user_tpl = get_template('correction/chunk-online-user.html')
        online_users = [(user.pk, user_tpl.render({'user': user}))
                        for user in online_users]

        return {
            'changes': changes,
            'corrections': corrections,
            'online': online_users,
            'online_alive': online_alive,
            'from': latest_date,
            'delay': 1000 * settings.CORRECTION_LIVE_UPDATE_POLL_INTERVAL,
        }
示例#35
0
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from redis import StrictRedis
from flask_wtf.csrf import CSRFProtect
from flask_session import Session
from config import config_dict

# 1.创建app对象
app = Flask(__name__)
# 2.添加配置信息
app.config.from_object(config_dict["development"])
# 3.创建数据库对象
db = SQLAlchemy(app)
# 4.创建redis存储对象
redis_store = StrictRedis(host=config_dict["development"].REDIS_HOST,
                          port=config_dict["development"].REDIS_PORT)
# 5.开启CSRF
CSRFProtect(app)
# 6.创建session对象
Session(app)
示例#36
0
import csv
from redis import StrictRedis

reader = csv.reader(open("./presidents.csv"))
header = reader.next()

client = StrictRedis()

for row in reader:
    key = "president:%s" % (row[0], )
    doc = dict(zip(header, row))

    client.set(key, doc)
示例#37
0
from flask import request
from redis import StrictRedis
from flask_oauthlib.provider import OAuth2Provider
from mongokit import Connection, Document
from bson.objectid import ObjectId
import logging

import settings


logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("oauth")
oauth = OAuth2Provider()
mongo = Connection(host=settings.MONGO_HOST,
                   port=settings.MONGO_PORT)
redisdb = StrictRedis(db=9, decode_responses=True)


# 认证数据结构
@mongo.register
class User(Document):
    """A user, or resource owner,
    is usually the registered user on your site."""
    __collection__ = 'users'
    __database__ = 'crandom'
    use_dot_notation = True
    use_schemaless = True

    structure = {
            "username": str,
            "password": str,
示例#38
0
from flask_wtf import CSRFProtect
# 导入配置对象的字典
from config import config, Config
# 导入日志模块
import logging
# 导入日志模块中的文件处理
from logging.handlers import RotatingFileHandler
from redis import StrictRedis
# 导入wtf扩展生成csrf_token
from flask_wtf import csrf

# 实例化sqlalchemy对象
db = SQLAlchemy()
# 实例化redis数据库,用来临时缓存和业务逻辑相关的数据,比如说图片验证码、短信验证码、用户信息
redis_store = StrictRedis(host=Config.REDIS_HOST,
                          port=Config.REDIS_PORT,
                          decode_responses=True)

# 集成项目日志
# 设置日志的记录等级
logging.basicConfig(level=logging.DEBUG)  # 调试debug级
# 创建日志记录器,指明日志保存的路径、每个日志文件的最大大小、保存的日志文件个数上限
file_log_handler = RotatingFileHandler("logs/log",
                                       maxBytes=1024 * 1024 * 100,
                                       backupCount=10)
# 创建日志记录的格式 日志等级 输入日志信息的文件名 行数 日志信息
formatter = logging.Formatter(
    '%(levelname)s %(filename)s:%(lineno)d %(message)s')
# 为刚创建的日志记录器设置日志记录格式
file_log_handler.setFormatter(formatter)
# 为全局的日志工具对象(flask app使用的)添加日志记录器
示例#39
0
文件: helpers.py 项目: praiskup/copr
 def get_connection(self):
     return StrictRedis(host=self.host, port=self.port, db=self.db)
示例#40
0
def work_on_population(redis: StrictRedis,
                       start_time: int,
                       max_runtime_s: int,
                       kill_handler: KillHandler):
    """
    Here the actual sampling happens.
    """

    # set timers
    population_start_time = time()
    cumulative_simulation_time = 0

    # read from pipeline
    pipeline = redis.pipeline()
    # extract bytes
    ssa_b, batch_size_b, all_accepted_b, n_req_b, n_acc_b \
        = (pipeline.get(SSA).get(BATCH_SIZE)
           .get(ALL_ACCEPTED).get(N_REQ).get(N_ACC).execute())

    if ssa_b is None:
        return

    kill_handler.exit = False

    if n_acc_b is None:
        return

    # convert from bytes
    simulate_one, sample_factory = pickle.loads(ssa_b)
    batch_size = int(batch_size_b.decode())
    all_accepted = bool(int(all_accepted_b.decode()))
    n_req = int(n_req_b.decode())

    # notify sign up as worker
    n_worker = redis.incr(N_WORKER)
    logger.info(
        f"Begin population, batch size {batch_size}. "
        f"I am worker {n_worker}")

    # counter for number of simulations
    internal_counter = 0

    # create empty sample
    sample = sample_factory()

    # loop until no more particles required
    while int(redis.get(N_ACC).decode()) < n_req \
            and (not all_accepted or int(redis.get(N_EVAL).decode()) < n_req):
        if kill_handler.killed:
            logger.info(
                f"Worker {n_worker} received stop signal. "
                f"Terminating in the middle of a population "
                f"after {internal_counter} samples.")
            # notify quit
            redis.decr(N_WORKER)
            sys.exit(0)

        # check whether time's up
        current_runtime = time() - start_time
        if current_runtime > max_runtime_s:
            logger.info(
                f"Worker {n_worker} stops during population because "
                f"runtime {current_runtime} exceeds "
                f"max runtime {max_runtime_s}")
            # notify quit
            redis.decr(N_WORKER)
            return

        # increase global number of evaluations counter
        particle_max_id = redis.incr(N_EVAL, batch_size)

        # timer for current simulation until batch_size acceptances
        this_sim_start = time()
        # collect accepted particles
        accepted_samples = []

        # make batch_size attempts
        for n_batched in range(batch_size):
            # increase evaluation counter
            internal_counter += 1
            try:
                # simulate
                new_sim = simulate_one()
                # append to current sample
                sample.append(new_sim)
                # check for acceptance
                if new_sim.accepted:
                    # the order of the IDs is reversed, but this does not
                    # matter. Important is only that the IDs are specified
                    # before the simulation starts

                    # append to accepted list
                    accepted_samples.append(
                        cloudpickle.dumps(
                            (particle_max_id - n_batched, sample)))
                    # initialize new sample
                    sample = sample_factory()
            except Exception as e:
                logger.warning(f"Redis worker number {n_worker} failed. "
                               f"Error message is: {e}")
                # initialize new sample to be sure
                sample = sample_factory()

        # update total simulation-specific time
        cumulative_simulation_time += time() - this_sim_start

        # push to pipeline if at least one sample got accepted
        if len(accepted_samples) > 0:
            # new pipeline
            pipeline = redis.pipeline()
            # update particles counter
            pipeline.incr(N_ACC, len(accepted_samples))
            # note: samples are appended 1-by-1
            pipeline.rpush(QUEUE, *accepted_samples)
            # execute all commands
            pipeline.execute()

    # end of sampling loop

    # notify quit
    redis.decr(N_WORKER)
    kill_handler.exit = True
    population_total_time = time() - population_start_time
    logger.info(
        f"Finished population, did {internal_counter} samples. "
        f"Simulation time: {cumulative_simulation_time:.2f}s, "
        f"total time {population_total_time:.2f}.")
示例#41
0
    def run(self):
        """
        Called when the process intializes.
        """
        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
                self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
                continue

            # Discover unique metrics
            unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))

            # Clear out the count of screen hits from holt-winters
            self.redis_conn.set('holtfalse', int(0))

            if len(unique_metrics) == 0:
                logger.info('no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Spawn processes
            pids = []
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
                if i > len(unique_metrics):
                    logger.info('WARNING: skyline is set for more cores than needed.')
                    break

                p = Process(target=self.spin_process, args=(i, unique_metrics))
                pids.append(p)
                p.start()

            # Send wait signal to zombie processes
            for p in pids:
                p.join()

            # Grab data from the queue and populate dictionaries
            exceptions = dict()
            anomaly_breakdown = dict()
            while 1:
                try:
                    key, value = self.anomaly_breakdown_q.get_nowait()
                    if key not in anomaly_breakdown.keys():
                        anomaly_breakdown[key] = value
                    else:
                        anomaly_breakdown[key] += value
                except Empty:
                    break

            while 1:
                try:
                    key, value = self.exceptions_q.get_nowait()
                    if key not in exceptions.keys():
                        exceptions[key] = value
                    else:
                        exceptions[key] += value
                except Empty:
                    break

            # Send alerts
            if settings.ENABLE_ALERTS:
                for alert in settings.ALERTS:
                    for metric in self.anomalous_metrics:
                        if alert[0] in metric[1]:
                            cache_key = 'last_alert.%s.%s' % (alert[1], metric[1])
                            try:
                                last_alert = self.redis_conn.get(cache_key)
                                if not last_alert:
                                    self.redis_conn.setex(cache_key, alert[2], packb(metric[0]))
                                    trigger_alert(alert, metric)

                            except Exception as e:
                                logger.error("couldn't send alert: %s" % e)

            # Write anomalous_metrics to static webapp directory
            filename = path.abspath(path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP))
            with open(filename, 'w') as fh:
                # Make it JSONP with a handle_data() function
                anomalous_metrics = list(self.anomalous_metrics)
                anomalous_metrics.sort(key=operator.itemgetter(1))
                fh.write('handle_data(%s)' % anomalous_metrics)

            # Log progress
            logger.info('seconds to run    :: %.2f' % (time() - now))
            logger.info('total metrics     :: %d' % len(unique_metrics))
            logger.info('total analyzed    :: %d' % (len(unique_metrics) - sum(exceptions.values())))
            logger.info('secondary screen  :: %s' % self.redis_conn.get('holtfalse'))
            logger.info('total anomalies   :: %d' % len(self.anomalous_metrics))
            logger.info('exception stats   :: %s' % exceptions)
            logger.info('anomaly breakdown :: %s' % anomaly_breakdown)
            self.redis_conn.set('holtfalse', int(0))

            # Log to Graphite
            self.send_graphite_metric('skyline.analyzer.run_time', '%.2f' % (time() - now))
            self.send_graphite_metric('skyline.analyzer.total_analyzed', '%.2f' % (len(unique_metrics) - sum(exceptions.values())))

            # Check canary metric
            raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC)
            if raw_series is not None:
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)
                time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600
                projected = 24 * (time() - now) / time_human

                logger.info('canary duration   :: %.2f' % time_human)
                self.send_graphite_metric('skyline.analyzer.duration', '%.2f' % time_human)
                self.send_graphite_metric('skyline.analyzer.projected', '%.2f' % projected)

            # Reset counters
            self.anomalous_metrics[:] = []

            # Sleep if it went too fast
            if time() - now < 5:
                logger.info('sleeping due to low run time...')
                sleep(10)
示例#42
0
def get_cache(strict=False):
    redis_url = current_app.config.get("REDIS_URL")
    if strict:
        return StrictRedis.from_url(redis_url)
    return from_url(redis_url)
示例#43
0
 def init_db():
     app.redis = StrictRedis.from_url(app.config['REDIS_URL'])
示例#44
0
class Analyzer(Thread):
    def __init__(self, parent_pid):
        """
        Initialize the Analyzer
        """
        super(Analyzer, self).__init__()
        self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
        self.daemon = True
        self.parent_pid = parent_pid
        self.current_pid = getpid()
        self.anomalous_metrics = Manager().list()
        self.exceptions_q = Queue()
        self.anomaly_breakdown_q = Queue()

    def check_if_parent_is_alive(self):
        """
        Self explanatory
        """
        try:
            kill(self.current_pid, 0)
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def send_graphite_metric(self, name, value):
        if settings.GRAPHITE_HOST != '':
            sock = socket.socket()
            sock.connect((settings.GRAPHITE_HOST, settings.CARBON_PORT))
            sock.sendall('%s %s %i\n' % (name, value, time()))
            sock.close()
            return True

        return False

    def spin_process(self, i, unique_metrics):
        """
        Assign a bunch of metrics for a process to analyze.
        """
        # Discover assigned metrics
        keys_per_processor = int(ceil(float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES)))
        if i == settings.ANALYZER_PROCESSES:
            assigned_max = len(unique_metrics)
        else:
            assigned_max = i * keys_per_processor
        assigned_min = assigned_max - keys_per_processor
        assigned_keys = range(assigned_min, assigned_max)

        # Compile assigned metrics
        assigned_metrics = [unique_metrics[index] for index in assigned_keys]

        # Check if this process is unnecessary
        if len(assigned_metrics) == 0:
            return

        # Multi get series
        raw_assigned = self.redis_conn.mget(assigned_metrics)

        # Make process-specific dicts
        exceptions = defaultdict(int)
        anomaly_breakdown = defaultdict(int)

        # Distill timeseries strings into lists
        for i, metric_name in enumerate(assigned_metrics):
            self.check_if_parent_is_alive()

	    # Static exclusion
            # TODO: Make this a configuration item
            if (re.search('veritas', metric_name) and not re.search('yen', metric_name)):
                  exceptions['VeritasSkipped'] += 1
                  continue

            try:
                raw_series = raw_assigned[i]
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)

                anomalous, ensemble, datapoint = run_selected_algorithm(timeseries, metric_name)

                # If it's anomalous, add it to list
                if anomalous:
                    base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
                    metric = [datapoint, base_name]
                    self.anomalous_metrics.append(metric)

                    # Get the anomaly breakdown - who returned True?
                    for index, value in enumerate(ensemble):
                        if value:
                            algorithm = settings.ALGORITHMS[index]
                            anomaly_breakdown[algorithm] += 1

            # It could have been deleted by the Roomba
            except TypeError:
                exceptions['DeletedByRoomba'] += 1
            except TooShort:
                exceptions['TooShort'] += 1
            except Stale:
                exceptions['Stale'] += 1
            except Boring:
                exceptions['Boring'] += 1
            except:
                exceptions['Other'] += 1
                logger.info(traceback.format_exc())

        # Add values to the queue so the parent process can collate
        for key, value in anomaly_breakdown.items():
            self.anomaly_breakdown_q.put((key, value))

        for key, value in exceptions.items():
            self.exceptions_q.put((key, value))

    def run(self):
        """
        Called when the process intializes.
        """
        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
                self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
                continue

            # Discover unique metrics
            unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))

            # Clear out the count of screen hits from holt-winters
            self.redis_conn.set('holtfalse', int(0))

            if len(unique_metrics) == 0:
                logger.info('no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Spawn processes
            pids = []
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
                if i > len(unique_metrics):
                    logger.info('WARNING: skyline is set for more cores than needed.')
                    break

                p = Process(target=self.spin_process, args=(i, unique_metrics))
                pids.append(p)
                p.start()

            # Send wait signal to zombie processes
            for p in pids:
                p.join()

            # Grab data from the queue and populate dictionaries
            exceptions = dict()
            anomaly_breakdown = dict()
            while 1:
                try:
                    key, value = self.anomaly_breakdown_q.get_nowait()
                    if key not in anomaly_breakdown.keys():
                        anomaly_breakdown[key] = value
                    else:
                        anomaly_breakdown[key] += value
                except Empty:
                    break

            while 1:
                try:
                    key, value = self.exceptions_q.get_nowait()
                    if key not in exceptions.keys():
                        exceptions[key] = value
                    else:
                        exceptions[key] += value
                except Empty:
                    break

            # Send alerts
            if settings.ENABLE_ALERTS:
                for alert in settings.ALERTS:
                    for metric in self.anomalous_metrics:
                        if alert[0] in metric[1]:
                            cache_key = 'last_alert.%s.%s' % (alert[1], metric[1])
                            try:
                                last_alert = self.redis_conn.get(cache_key)
                                if not last_alert:
                                    self.redis_conn.setex(cache_key, alert[2], packb(metric[0]))
                                    trigger_alert(alert, metric)

                            except Exception as e:
                                logger.error("couldn't send alert: %s" % e)

            # Write anomalous_metrics to static webapp directory
            filename = path.abspath(path.join(path.dirname(__file__), '..', settings.ANOMALY_DUMP))
            with open(filename, 'w') as fh:
                # Make it JSONP with a handle_data() function
                anomalous_metrics = list(self.anomalous_metrics)
                anomalous_metrics.sort(key=operator.itemgetter(1))
                fh.write('handle_data(%s)' % anomalous_metrics)

            # Log progress
            logger.info('seconds to run    :: %.2f' % (time() - now))
            logger.info('total metrics     :: %d' % len(unique_metrics))
            logger.info('total analyzed    :: %d' % (len(unique_metrics) - sum(exceptions.values())))
            logger.info('secondary screen  :: %s' % self.redis_conn.get('holtfalse'))
            logger.info('total anomalies   :: %d' % len(self.anomalous_metrics))
            logger.info('exception stats   :: %s' % exceptions)
            logger.info('anomaly breakdown :: %s' % anomaly_breakdown)
            self.redis_conn.set('holtfalse', int(0))

            # Log to Graphite
            self.send_graphite_metric('skyline.analyzer.run_time', '%.2f' % (time() - now))
            self.send_graphite_metric('skyline.analyzer.total_analyzed', '%.2f' % (len(unique_metrics) - sum(exceptions.values())))

            # Check canary metric
            raw_series = self.redis_conn.get(settings.FULL_NAMESPACE + settings.CANARY_METRIC)
            if raw_series is not None:
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)
                time_human = (timeseries[-1][0] - timeseries[0][0]) / 3600
                projected = 24 * (time() - now) / time_human

                logger.info('canary duration   :: %.2f' % time_human)
                self.send_graphite_metric('skyline.analyzer.duration', '%.2f' % time_human)
                self.send_graphite_metric('skyline.analyzer.projected', '%.2f' % projected)

            # Reset counters
            self.anomalous_metrics[:] = []

            # Sleep if it went too fast
            if time() - now < 5:
                logger.info('sleeping due to low run time...')
                sleep(10)
示例#45
0
class BeaconProcessor(object):

    redis = StrictRedis(**redisConfig)

    rawQueueOGN = Queue()
    rawQueueFLR = Queue()
    rawQueueICA = Queue()
    queues = (rawQueueOGN, rawQueueFLR, rawQueueICA)
    queueKeys = ('rawQueueOGN', 'rawQueueFLR', 'rawQueueICA')

    workers = list()

    def __init__(self):

        # restore unprocessed data from redis:
        numRead = 0
        for key, queue in zip(self.queueKeys, self.queues):
            while True:
                item = self.redis.lpop(key)
                if not item:
                    break
                queue.put(item)
                numRead += 1
        print(f"[INFO] Loaded {numRead} raw message(s) from redis.")

        self.dbThread = DbThread(dbConnectionInfo)
        self.dbThread.start()

        for i, queue in enumerate(self.queues):
            rawWorker = RawWorker(index=i,
                                  dbThread=self.dbThread,
                                  rawQueue=queue)
            rawWorker.start()
            self.workers.append(rawWorker)

    def stop(self):
        for worker in self.workers:
            worker.stop()

        # store all unprocessed data into redis:
        n = 0
        for key, queue in zip(self.queueKeys, self.queues):
            n += queue.qsize()
            for item in list(queue.queue):
                self.redis.rpush(key, item)
        print(f"[INFO] Flushed {n} rawQueueX items into redis.")

        self.dbThread.stop()

        print('[INFO] BeaconProcessor terminated.')

    startTime = time.time()
    numEnquedTasks = 0

    def _printStats(self):
        now = time.time()
        tDiff = now - self.startTime
        if tDiff >= 60:
            numTasksPerMin = self.numEnquedTasks / tDiff * 60
            numQueuedTasks = self.rawQueueOGN.qsize() + self.rawQueueFLR.qsize(
            ) + self.rawQueueICA.qsize()

            print(
                f"Beacon rate: {numTasksPerMin:.0f}/min. {numQueuedTasks} queued."
            )

            self.numEnquedTasks = 0
            self.startTime = now

    def enqueueForProcessing(self, raw_message: str):
        self._printStats()

        prefix = raw_message[:3]
        if prefix == 'OGN':
            self.rawQueueOGN.put(raw_message)
        elif prefix == 'FLR':
            self.rawQueueFLR.put(raw_message)
        else:  # 'ICA'
            self.rawQueueICA.put(raw_message)

        self.numEnquedTasks += 1
示例#46
0
    if logger_name not in loggers:
        loggers[logger_name] = {'level': 'INFO'}

LOGGING = {
    'version': 1,
    'incremental': False,
    'disable_existing_loggers': False,
    'filters': {},
    'formatters': {
        'default': {
            'format':
            '[%(asctime)s %(levelname)-5.5s %(threadName)s %(name)s] %(message)s',
        }
    },
    'handlers': {
        'console': {
            'level': 'DEBUG',
            'class': 'logging.StreamHandler',
            'stream': 'ext://sys.stdout',
            'formatter': 'default',
        },
    },
    'loggers': loggers
}

SPINACH_BROKER = RedisBroker(
    redis=StrictRedis.from_url(config('REDIS_SPINACH_URL', default='redis://'),
                               **recommended_socket_opts))
SPINACH_NAMESPACE = 'feedsubs'
SPINACH_CLEAR_SESSIONS_PERIODICITY = timedelta(weeks=1)
示例#47
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from redis import StrictRedis

r = StrictRedis(host='127.0.0.1', port=6379)


def test():
    key = 'testkey'
    r.zadd(key, 0, 0)
    r.zadd(key, 1, '1')
    r.zadd(key, 2, '2')
    r.zadd(key, 3, '3')
    print(r.zrange(key, -1, -1))[0]
    print(type(r.zrange(key, -1, -1)[0]))

    print(r.zrange(key, 0, 0))[0]
    print(type(r.zrange(key, 0, 0)[0]))

    print(r.zcard(key))
    print(type(r.zcard(key)))


if __name__ == '__main__':
    test()
示例#48
0
class RawWorker(Thread):

    redis = StrictRedis(**redisConfig)

    def __init__(self, index: int, dbThread: DbThread, rawQueue: Queue):
        super(RawWorker, self).__init__()

        self.index = index
        self.dbThread = dbThread
        self.rawQueue = rawQueue

        self.doRun = True

    def __del__(self):
        self.doRun = False

    def stop(self):
        self.doRun = False

    def run(self):
        print(f"[INFO] Starting worker #{self.index}")
        while self.doRun:
            try:
                raw_message = self.rawQueue.get(block=False)
                if raw_message:
                    self._processMessage(raw_message)
            except Empty:
                time.sleep(1)  # ~ thread.yield()

        print(f"[INFO] Worker #{self.index} terminated.")

    def _saveToRedis(self, statusKey: str, status: Status):
        self.redis.set(statusKey, str(status))
        self.redis.expire(statusKey, REDIS_RECORD_EXPIRATION)

    def _processMessage(self, raw_message: str):
        beacon = None
        try:
            beacon = parse(raw_message)
            if not beacon or 'beacon_type' not in beacon.keys(
            ) or beacon['beacon_type'] != 'aprs_aircraft':
                return

        except ParseError as e:
            # print('[ERROR] when parsing a beacon: {}'.format(e.message))
            # print("Failed BEACON:", raw_message)
            return

        except Exception as e:
            # print('[ERROR] {}'.format(e))
            # if beacon:
            #     print("Failed BEACON:", beacon)
            return

        # we are not interested in para, baloons, uavs, static stuff and others:
        aircraftType = beacon['aircraft_type']
        if aircraftType in [4, 6, 7, 13, 11, 15, 16]:
            return

        address = beacon['address']
        groundSpeed = beacon['ground_speed']
        ts = round(beacon['timestamp'].timestamp())  # [s]
        # print(f"[INFO] {address} gs: {groundSpeed:.0f}")

        # print('[DEBUG] Gps H:', beacon['gps_quality']['horizontal'])

        currentStatus: Status = Status(ts=ts)
        currentStatus.s = 0 if groundSpeed < SPEED_THRESHOLD else 1  # 0 = on ground, 1 = airborne, -1 = unknown
        # TODO add AGL check (?)
        # TODO threshold by aircraftType

        prevStatus: Status = None
        statusKey = f"{address}-status"
        ps = self.redis.get(statusKey)
        if ps:
            try:
                prevStatus = Status.parse(ps)
            except ValueError as e:
                print('[ERROR] when parsing prev. status: ', e)

        if not prevStatus:  # we have no prior information
            self._saveToRedis(statusKey, currentStatus)
            return

        if currentStatus.s != prevStatus.s:
            addressType = beacon['address_type']
            aircraftType = beacon['aircraft_type']
            lat = beacon['latitude']
            lon = beacon['longitude']

            icaoLocation = AirfieldManager().getNearest(lat, lon)
            if not icaoLocation:
                return

            event = 'L' if currentStatus.s == 0 else 'T'  # L = landing, T = take-off
            flightTime = 0
            if event == 'L':
                flightTime = currentStatus.ts - prevStatus.ts  # [s]
                if flightTime < 60:
                    return

            self._saveToRedis(statusKey, currentStatus)

            dt = datetime.fromtimestamp(ts)
            dtStr = dt.strftime('%H:%M:%S')
            print(
                f"[INFO] {dtStr}; {icaoLocation}; {address}; {event}; {flightTime}"
            )

            strSql = f"INSERT INTO logbook_events " \
                f"(ts, address, address_type, aircraft_type, event, lat, lon, location_icao, flight_time) " \
                f"VALUES " \
                f"({ts}, '{address}', {addressType}, '{aircraftType}', " \
                f"'{event}', {lat:.5f}, {lon:.5f}, '{icaoLocation}', {flightTime});"

            # print('strSql:', strSql)

            self.dbThread.addStatement(strSql)
示例#49
0
    @contextmanager
    def auto_commit(self):
        try:
            yield
            self.session.commit()
        except Exception:
            self.session.rollback()
            raise SaveObjectException()


# mysql数据库
db = SQLAlchemy()

t_redis = StrictRedis(host=REDIS_HOST,
                      port=REDIS_PORT,
                      db=REDIS_DB,
                      password=REDIS_PASSWORD,
                      decode_responses=True)

# 缓存,依赖redis
cache = Cache(
    config={
        'CACHE_TYPE': 'redis',
        'CACHE_REDIS_HOST': REDIS_HOST,
        'CACHE_REDIS_PORT': REDIS_PORT,
        'CACHE_REDIS_PASSWORD': REDIS_PASSWORD,
        'CACHE_REDIS_DB': REDIS_DB
    })


class EntityModel(db.Model):
    #16 uint8_t secs;
    #17 uint8_t sats;
    #18 char[] padding;

    fields = ['lat', 'lon', 'alt', 'i', 'hour', 'mins', 'secs', 'sats']
    unpacked_payload = list(struct.unpack('<iiiHBBBB2s', packed))
    payload = dict(zip(fields, unpacked_payload))
    payload['lat'] /= 10000000.0
    payload['lon'] /= 10000000.0
    payload['alt'] /= 1000.0
    return payload


if __name__ == '__main__':

    r = StrictRedis(host=s.REDIS_HOST, port=s.REDIS_PORT, db=s.REDIS_DB)
    try:
        if r.ping():
            print "Redis connected."
    except ConnectionError:
        "Error: Redis server not available."

    msg = r.get('last_msg')
    if not msg:
        last_msg = None
    else:
        last_msg = json.loads(msg)

    p = r.pubsub()
    p.subscribe('dashboard_messages')
示例#51
0
MONGOCLIENT = MongoClient(MONGO_URI, 27017, serverSelectionTimeoutMS=1)
MONGO = MONGOCLIENT.userbot


def is_mongo_alive():
    try:
        MONGOCLIENT.server_info()
    except BaseException:
        return False
    return True


# Init Redis
# Redis will be hosted inside the docker container that hosts the bot
# We need redis for just caching, so we just leave it to non-persistent
REDIS = StrictRedis(host='localhost', port=6379, db=0)


def is_redis_alive():
    try:
        REDIS.ping()
        return True
    except BaseException:
        return False


# Setting Up CloudMail.ru and MEGA.nz extractor binaries,
# and giving them correct perms to work properly.
if not os.path.exists('bin'):
    os.mkdir('bin')
示例#52
0
文件: main.py 项目: mike-live/DashExp
import dash_devices
import dash_html_components as html
import multiprocessing as mp
from dash_devices.dependencies import Input, Output
from redis import StrictRedis
import time

app = dash_devices.Dash(__name__)
storage = StrictRedis(host='localhost', port=6379)

pubsub = storage.pubsub()


def task(*args):
    print('task - started')
    storage['task-info'] = 'startTask'
    time.sleep(3)
    storage['task-info'] = 'firstStage'
    time.sleep(3)
    storage['task-info'] = 'secondStage'
    time.sleep(3)
    storage['task-info'] = 'Done'

    print('task - ended')


class Example:
    def __init__(self, app):
        self.app = app

        self.app.layout = html.Div([
示例#53
0
 def setUp(self):
     super(TestWebHooks, self).setUp()
     self.connection = StrictRedis()
     self.connection.flushall()
示例#54
0
# Redis Server Options
DOTENV_FILE = os.path.join(os.getcwd(), 'chatbot', '.env')
env_config = Config(RepositoryEnv(DOTENV_FILE))

HOST = env_config.get('REDIS_SERVER_HOST')

try:
    PASSWORD = env_config.get('REDIS_SERVER_PASSWORD')
except UndefinedValueError:
    PASSWORD = None

PORT = env_config.get('REDIS_SERVER_PORT')

if PASSWORD is None:
    REDIS_CONNECTION = StrictRedis(host=HOST, port=PORT)
else:
    REDIS_CONNECTION = StrictRedis(host=HOST, password=PASSWORD, port=PORT)

try:
    CHATBOX_DEMO_APPLICATION = env_config.get('CHATBOX_DEMO_APPLICATION',
                                              cast=bool)
except UndefinedValueError:
    CHATBOX_DEMO_APPLICATION = False

try:
    USE_CELERY = env_config.get('USE_CELERY', cast=bool)
except UndefinedValueError:
    USE_CELERY = False

示例#55
0
def make_redis():
    return StrictRedis(host=config.REDIS_HOST, port=config.REDIS_PORT)
示例#56
0
import json
import logging

from azure.eventhub import EventHubProducerClient
from flask import Flask
from flask_restful import Api
from redis import StrictRedis

with open('./config.json', 'r') as f:
    config = json.load(f)

try:
    redis_args = config["redis"]
    event_hub_args = config["event_hub"]
except KeyError as error:
    logging.error("Config error")
    raise

# Main flask app
app = Flask(__name__)

# flask-restful api
api = Api(app)

# redis client
my_redis = StrictRedis(**redis_args)

# Azure Event hub client
event_hub_client = EventHubProducerClient.from_connection_string(**event_hub_args)
示例#57
0
    """

    args = docopt(__docopt__, version="0.3.0")

    startup_nodes = [{"host": args["--host"], "port": args["--port"]}]

    if not args["--nocluster"]:
        from rediscluster import StrictRedisCluster
        rc = StrictRedisCluster(startup_nodes=startup_nodes,
                                max_connections=32,
                                socket_timeout=0.1,
                                decode_responses=True)
    else:
        from redis import StrictRedis
        rc = StrictRedis(host=args["--host"],
                         port=args["--port"],
                         socket_timeout=0.1,
                         decode_responses=True)

    if args["--timeit"]:
        test_itterstions = [
            5000,
            10000,
            20000,
        ]

        if args["--pipeline"]:
            for itterations in test_itterstions:
                timeit_pipeline(rc, itterations=itterations)
        else:
            for itterations in test_itterstions:
                timeit(rc, itterations=itterations)
示例#58
0
class TestWebHooks(Test):
    def setUp(self):
        super(TestWebHooks, self).setUp()
        self.connection = StrictRedis()
        self.connection.flushall()

    @with_context
    @patch('pybossa.jobs.requests')
    def test_webhooks(self, mock):
        """Test WEBHOOK works."""
        mock.post.return_value = True
        err_msg = "The webhook should return True from patched method"
        assert webhook('url'), err_msg
        err_msg = "The post method should be called"
        assert mock.post.called, err_msg

    @with_context
    @patch('pybossa.jobs.requests')
    def test_webhooks_without_url(self, mock):
        """Test WEBHOOK without url works."""
        mock.post.return_value = True
        err_msg = "The webhook should return False"
        assert webhook(None) is False, err_msg

    @with_context
    @patch('pybossa.model.event_listeners.webhook_queue', new=queue)
    def test_trigger_webhook_without_url(self):
        """Test WEBHOOK is triggered without url."""
        project = ProjectFactory.create()
        task = TaskFactory.create(project=project, n_answers=1)
        TaskRunFactory.create(project=project, task=task)
        assert queue.enqueue.called is False, queue.enqueue.called
        queue.reset_mock()

    @with_context
    @patch('pybossa.model.event_listeners.webhook_queue', new=queue)
    def test_trigger_webhook_with_url_not_completed_task(self):
        """Test WEBHOOK is not triggered for uncompleted tasks."""
        import random
        project = ProjectFactory.create()
        task = TaskFactory.create(project=project)
        for i in range(1, random.randrange(2, 5)):
            TaskRunFactory.create(project=project, task=task)
        assert queue.enqueue.called is False, queue.enqueue.called
        assert task.state != 'completed'
        queue.reset_mock()

    @with_context
    @patch('pybossa.model.event_listeners.webhook_queue', new=queue)
    def test_trigger_webhook_with_url(self):
        """Test WEBHOOK is triggered with url."""
        url = 'http://server.com'
        project = ProjectFactory.create(webhook=url, )
        task = TaskFactory.create(project=project, n_answers=1)
        TaskRunFactory.create(project=project, task=task)
        payload = dict(
            event='task_completed',
            project_short_name=project.short_name,
            project_id=project.id,
            task_id=task.id,
            fired_at=datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"))
        assert queue.enqueue.called
        assert queue.called_with(webhook, url, payload)
        queue.reset_mock()
示例#59
0
def register_redis(app):
    redis_data = StrictRedis(db=app.config['CACHE_REDIS_DB'],
                             password=app.config['CACHE_REDIS_PASSWORD'],
                             decode_responses=True)
    return redis_data
示例#60
0
文件: helpers.py 项目: praiskup/copr
def get_redis_connection():
    """
    Creates connection to redis, now we use default instance at localhost, no config needed
    """
    return StrictRedis()