Esempio n. 1
0
def best_guess_answer(words, no_of_answers=3):
    """ Find the best answers from autocompletes 'words' - It is assumed words is correct """
    keys = [SKEY_DOCS_PREFIX+word for word in words]
    
    keys.extend(['%s' % SEARCH_RANKING])
    logger.info('best_guess_answer, keys: %s' % keys)
    logger.info(purple("%s") % r.zrange('tmp-df', 0, -1))
    try:
        r.zinterstore('tmp-df', keys)
        logger.info(purple("%s") % r.zrange('tmp-df', 0, -1))
    except:
        print "except"
Esempio n. 2
0
def dump_redis():
    """Dump all Redis data for Search App in the logger. """
    logger.info(sgreen('==================== REDIS DUMP FOR SEARCH APP ===================='))
    dumpStr = '\n===== Autocomplete =====\n'
    dumpStr += purple("%s\n") % r.zrange(ZKEY_AUTOCOMPLETE, 0, -1)  # Return a range of members in a sorted set
    dumpStr += '\n===== Complete words/phrase score based on autocomplete =====\n'
    dumpStr += purple("%s\n") % r.zrangebyscore(ZKEY_AUTOCOMPLETE_SCORE, float("-inf"), float("inf"))
    dumpStr += "\n===== Any Prefixs with key '%s' =====" % SKEY_AUTOCOMPLETE
    for key in r.keys(SKEY_AUTOCOMPLETE+'*'):
        dumpStr += green('\nKey: %s\nValue: %s\n' % (key, r.sunion(key)))
    dumpStr += "models:"
    for key in r.keys(SKEY_MODELS+"*"):
        dumpStr += green('\nKey: %s\nValue: %s\n' % (key, r.sunion(key)))
    logger.info(dumpStr)
    logger.info('==================== END OF REDIS DUMP ====================')
Esempio n. 3
0
def log_dump_redis():
    """ Dump All Redis Data for Search App."""
    logger.info(sgreen('########################## REDIS DUMP FOR SEARCH APP ################################'))
    dumpStr = '\n###### Autocomplete ######\n'
    dumpStr += purple("%s") % r.zrange(ZKEY_AUTOCOMPLETE, 0, -1)  # Return a range of members in a sorted set, by index
    dumpStr += "\n###### Any Prefixs with key '%s' ######" % SKEY_DOCS_PREFIX
    for key in r.keys(SKEY_DOCS_PREFIX+'*'):
        dumpStr += green('\nKey: %s\n\t%s' % (key, r.sunion(key)))
        set_value = r.sunion(key)
        for redis_value in set_value:
            dumpStr += "\nKey: %s\n\t%s" %(SKEY_MODELS + redis_value, r.sunion(SKEY_MODELS + redis_value))

    logger.info(dumpStr)

    logger.info("For Prefix Key: '%s' Redis has the following keys:\n%s" % (SKEY_DOCS_PREFIX, r.keys(SKEY_DOCS_PREFIX+'*')))
    logger.info('############################## END OF REDIS DUMP ####################################')
    def _runScheduledTask(self):
        """ Process new stat value A
            Add new value into buffer. If full need to consolidate values first. """
        logger.info("%s" % purple("=" * 100))
        logger.info("This is a schedule run task for bucket: %s for keys: %s" % (purple(self.name), self.stat_keys))
        for key in self.stat_keys:
            config = r.hgetall(RRD_BUCKET_KEY + ":" + self.name + ":" + key)
            count = r.hget(STAT_KEY, key)
            # current_row = int(config['current_row'])
            # full = int(config['full'])
            # count = config['count']
            # logger.debug("stat '%s' is on row %s  (full: %s, count: %s len of rrd: %s)" %
            #              (key, current_row, full, count, len(r.zrange("%s:%s:rrd" % (self.name, key), 0, -1))))

            rrd_data = r.zrange("%s:%s:rrd" % (self.name, key), 0, -1)
            logger.error("ZRANGE 0 -1     %s" % rrd_data)
            logger.warn(
                "ZRANGE -INF +INF %s" % r.zrangebyscore("%s:%s:rrd" % (self.name, key), float("-inf"), float("inf"))
            )
            length_rrd = len(rrd_data)

            if length_rrd == self.rows:
                timestamp = int(time.time())
                r.zadd("%s:%s:rrd" % (self.name, key), "%s:%s" % (timestamp, count), timestamp)
                logger.info("zrange 0 1 %s" % r.zrange("%s:%s:rrd" % (self.name, key), 0, 1))
                data1, data2 = r.zrange("%s:%s:rrd" % (self.name, key), 0, 1)
                r.zremrangebyrank("%s:%s:rrd" % (self.name, key), 0, 1)
                timestamp_consolidated, data_consolidated = self.aggregrate(data1, data2)
                r.zadd(
                    "%s:%s:rrd" % (self.name, key),
                    "%s:%s" % (timestamp_consolidated, data_consolidated),
                    timestamp_consolidated,
                )
                logger.debug("%s" % r.zrange("%s:%s:rrd" % (self.name, key), 0, -1))
            elif length_rrd > self.rows:
                logger.error(
                    "Length of RRD data %s (%s) is greater than bucket size %s. This should never happen. Removing excess data ..."
                    % (self.name, length_rrd, self.rows)
                )
                r.zrangebyscore("%s:%s:rrd" % (self.name, key), 0, (length_rrd - self.rows - 1))
            else:
                ## TODO: REMVOE LINE
                # count = random.randint(0, 10)
                # If buffer is not full, add new stat value and rotate circular buffer (increment current row)
                timestamp = int(time.time())
                r.zadd("%s:%s:rrd" % (self.name, key), "%s:%s" % (timestamp, count), timestamp)
                # Check full, increment current row & save to redis
                # if current_row == self.rows - 1:
                #    r.hmset(RRD_BUCKET_KEY + ':' + self.name + ':' + key, {'full': 1})
                #    current_row = 0
                # else:
                #    current_row += 1
                # r.hmset(RRD_BUCKET_KEY + ':' + self.name + ':' + key, {'current_row': current_row})
                logger.info("%s" % r.zrange("%s:%s:rrd" % (self.name, key), 0, -1))

            # # If circular buffer (rrd) is full we need to consolidate/aggregrate
            # # else shift and add to sorted set
            # if full:
            #     # # TODO: REMVOE LINE
            #     # count = random.randint(0, 10)
            #     timestamp = time.time()
            #     r.zadd("%s:%s:rrd" % (self.name, key), "%s:%s" % (timestamp, count), timestamp)
            #     logger.info("zrange 0 1 %s" % r.zrange("%s:%s:rrd" % (self.name, key), 0, 1))
            #     data1, data2 = r.zrange("%s:%s:rrd" % (self.name, key), 0, 1)
            #     r.zremrangebyrank("%s:%s:rrd" % (self.name, key), 0, 1)
            #     timestamp_consolidated, data_consolidated = self.aggregrate(data1, data2)
            #     r.zadd("%s:%s:rrd" % (self.name, key), "%s:%s" % (timestamp_consolidated, data_consolidated), timestamp_consolidated)
            #     logger.debug("%s" % r.zrange("%s:%s:rrd" % (self.name, key), 0, -1))
            # else:
            #     ## TODO: REMVOE LINE
            #     #count = random.randint(0, 10)
            #     # If buffer is not full, add new stat value and rotate circular buffer (increment current row)
            #     timestamp = time.time()
            #     r.zadd("%s:%s:rrd" % (self.name, key), "%s:%s" % (timestamp, count), timestamp)
            #     # Check full, increment current row & save to redis
            #     if current_row == self.rows - 1:
            #         r.hmset(RRD_BUCKET_KEY + ':' + self.name + ':' + key, {'full': 1})
            #         current_row = 0
            #     else:
            #         current_row += 1
            #     r.hmset(RRD_BUCKET_KEY + ':' + self.name + ':' + key, {'current_row': current_row})
            #     logger.info("%s" % r.zrange("%s:%s:rrd" % (self.name, key), 0, -1))
        logger.info("%s" % purple("=" * 100))