def schedule_row_cache(row_id, delay): """计划缓存数据行 Args: row_id (str): 'users:1:posts:1', 资源唯一id delay (int): 10, 更新时间间隔 """ redis_db.zadd('delay:', row_id, delay) # 数据行更新时间延迟 redis_db.zadd('schedule:', row_id, time.time()) # 数据行更新时间点
def load_ip_to_redis(filename=IP_TEXT, flush=True): """解析ip, 存储到 redis""" lines = count_lines(filename) if flush: redis_db.delete('ip_location:') redis_db.delete('location:') with open(filename, encoding='utf-8', mode='r+') as fp: ip_mapping = {} location_mapping = {} temp_location = None temp_score = None temp_info = None count = 0 lineno = 0 for lineno, line in enumerate(fp): if not line: continue pieces = line.split(',') score = pieces[1] country, province, city = pieces[4:7] location = city or province or country if temp_location and location != temp_location: ip_mapping['%s_%s' % (temp_location, count)] = temp_score location_mapping[temp_location] = temp_info count += 1 if not count % 10000: redis_db.zadd('ip_location:', ip_mapping) redis_db.hmset('location:', location_mapping) ip_mapping = {} location_mapping = {} print('Loaded ips: %s/%s' % (lineno, lines)) temp_location = location temp_score = score temp_info = '%s:%s:%s' % (country, province, city) ip_mapping['%s_%s' % (temp_location, count)] = temp_score location_mapping[temp_location] = temp_info redis_db.zadd('ip_location:', ip_mapping) redis_db.hmset('location:', location_mapping) print('Loaded ips: %s/%s' % (lineno, lines)) print('All done!')
def update_token(token, user, item=None): timestamp = time.time() # token 记录用户信息 redis_db.hset('login:'******'recent:', token, timestamp) if item: redis_db.zadd('viewed:' + token, item, timestamp) # 只保留用户浏览的最后 25 个商品 redis_db.zremrangebyrank('viewed:' + token, 0, -26) # 对于浏览的商品按浏览次数排序 redis_db.zincrby('viewed:', item, -1) # 往索引 0 爬
def cache_rows(): while not QUIT: next = redis_db.zrange('schedule:', 0, 0, withscores=True) now = time.time() if not next or next[0][1] > now: time.sleep(.05) continue row_id = next[0][0] # 延迟为 0 视为删除 delay = redis_db.zscore('delay:', row_id) if delay <= 0: redis_db.zrem('delay:', row_id) redis_db.zrem('schedule:', row_id) redis_db.delete('inv:' + row_id) continue row = Inventory.get(row_id) redis_db.zadd('schedule:', row_id, now + delay) redis_db.set('inv:' + row_id, json.dumps(row.to_dict()))
def post_article(user, title, link): article_id = str(redis_db.incr('article:')) voted = 'voted:' + article_id redis_db.sadd(voted, user) redis_db.expire(voted, ONE_WEEK_IN_SECONDS) now = time.time() article = 'article:' + article_id redis_db.hmset(article, { 'title': title, 'link': link, 'poster': user, 'time': now, 'votes': 1 }) redis_db.zadd('score:', article, now + VOTE_SCORE) redis_db.zadd('time:', article, now) return article_id
def redis_add_word(word) -> bool: """ Adds words provided by user to redis zset if not already present Word is converted to lowercase, and added with a score of 0 to the zset Since all words are added with same score, the zset sorts them in lexicographic order All substrings of the word are extracted and added The complete word is added at the last with a '*' appended to it to distinguish it as the actual word added e.g for foo, entries added are f, fo, foo* --- parameters: - name: word - type: string(anycase) - required: true - description: word to be parsed and added to redis zset returns: - bool: description: Indicates if word is added successfully or not """ try: word = word.lower() if redis_db.zrank(app.config['REDIS_ZSET'], word): app.logger.info(f'Word "{word}" already present in dictionary') return True for index in range(1, len(word)): redis_db.zadd(app.config['REDIS_ZSET'], {word[:index]: 0}) word += '*' redis_db.zadd(app.config['REDIS_ZSET'], {word: 0}) app.logger.info(f'Added the word "{word[:-1]}" to dictionary') return True except ConnectionError as err: app.logger.error(f'Failed adding word "{word}", to dictionary. {err}') return False