def add_history(self, id, commodity): """append one browsing history for target user""" with manager_redis(self.db) as redis: try: redis.lpush(id, commodity) except Exception as e: consumer_logger.error(e)
def get_online_counts(self, key=None): """statistic the number of users who online""" with manager_redis(self.db) as redis: if not key: key = 'online' counts = redis.get(key) return counts
def sync_favorites_add_callback(self, sender, instance, user, queryset, **kwargs): """ 同步redis中的favorites数据,信号回调函数 添加到zset和hash表中 :param sender: 发送方Collection :param instance: 创建的Collection实例 :param user: 当前登录的用户 :param queryset: 当前用户收藏的商品/种类/其他 :param kwargs: 额外参数 :return: 同步是否成功, bool """ with manager_redis(self.db) as redis: user_pk = user.pk zset_key = self.zset_key_commodity(user_pk) serialized_commodity = serializers.serialize('python', queryset) # 序列化商品 pipe = redis.pipeline() # 开启管道 if serialized_commodity: collection_pk = instance.pk # collection的pk fields = serialized_commodity[0].get('fields') fields.update({'pk': serialized_commodity[0].get('pk')}) timestamp = time.time() pipe.zadd(zset_key, {collection_pk: timestamp}) # 向有序集合中添加收藏夹元素 hash_key_store = self.hash_key_commodity( user_pk, collection_pk) pipe.hmset( hash_key_store, self.serializer_commodity_data(fields)) # 深度格式成JSON数据 pipe.expire(zset_key, 30) # 重置zset过期时间30s pipe.expire(hash_key_store, 30) # 重置hash过期时间时间30s pipe.execute()
def sync_favorites_delete_callback(self, sender, user, collection_pk=None, is_all=False, **kwargs): """ 同步删除redis中的favorites数据,信号回调函数 """ with manager_redis(self.db) as redis: user_pk = user.pk zset_key = self.zset_key_commodity(user_pk) pipe = redis.pipeline() if is_all: collection_list = (value.decode() for value in redis.zrange(zset_key, 0, -1) ) # 返回所有的商品id列表,用于删除hash表 pipe.delete(zset_key) # 删除zset中目标收藏的商品的元素 for pk in collection_list: hash_temp_key = self.hash_key_commodity(user_pk, pk) pipe.delete(hash_temp_key) else: hash_key = self.hash_key_commodity(user_pk, collection_pk) pipe.zrem(zset_key, collection_pk) pipe.delete(hash_key) # 删除某一个hash表 pipe.execute()
def delete_one_good(self, user_id, **kwargs): """ delete goods based on commodity_id and store_id from redis :param user_id: :param kwargs: 额外参数,包含store_id,good_id,counts :return:bool """ with manager_redis(self.db) as redis: if 'store_id' not in kwargs or 'good_id' not in kwargs: return False else: try: store_id = int(kwargs['store_id']) first_key = self.key('Cart', user_id, 'store') store_list = [ int(i.decode()) for i in redis.lrange(first_key, 0, -1) ] # O(n) if store_id not in store_list: # 如果该用户名下没有该商铺,则返回删除失败 return False else: good_id = int(kwargs['good_id']) second_key = self.key('Cart', user_id, store_id, 'counts') third_key = self.key('Cart', user_id, store_id, 'price') is_success = redis.zrem(second_key, good_id) and redis.zrem( third_key, good_id) # O(1) return True if is_success else False except Exception as e: consumer_logger.error(e) return False
def delete_search_all(self, sender, request, **kwargs): """ 群删所有搜索历史记录 """ with manager_redis(self.DB, type(self)) as redis: print(sender) redis.delete(self.user_key(sender))
def get_history(self, id, page): """Gets 10 browsing histories of the target user """ with manager_redis(self.db) as redis: try: history_list = redis.lrange(id, page * 10, (page + 1) * 10) history_counts = redis.llen(id) return history_list, history_counts except Exception as e: consumer_logger.error(e)
def heat_search(self, sender, request, key, **kwargs): """ 每日热搜 动态更新每日的前十位 """ with manager_redis(self.DB, type(self)) as redis: date = datetime.datetime.today() result = redis.zrevrange(self.heat_key(date), 0, 10) # 前十大热搜 return result
def retrieve_last_ten(self, sender, request, key, **kwargs): """获取最新的10条搜索记录""" # with 生存周期持续到函数结束 with manager_redis(self.DB, type(self)) as redis: page = kwargs.get('page') count = kwargs.get('count') result = redis.zrevrange(self.user_key(sender), page*count, (page+1)*count) # 返回分数从高到低的前十个(时间最近的前十个) return result
def save_search(self, sender, request, key, **kwargs): """ 记录某用户的浏览记录 有效时间1个月 """ with manager_redis(self.DB, type(self)) as redis: # 为每个用户维护一个搜索有序集合 with redis.pipeline() as pipe: pipe.zadd(self.user_key(sender), {key:self.score}) # 60*60*24*30 = 25920000 30天存活 pipe.expire(sender, 25920000) pipe.zincrby(self.heat_key(datetime.datetime.today()), 1, key) # 将该关键字添加到热搜有序集合中,如果存在key,则+1,不存在设置为1 pipe.execute()
def record_user_browsing_times(self, sender, ip, **kwargs): """ 记录每天网站访问量 :param sender: 发送者 :param date: 日期类型 :param kwargs: 额外参数 :return: """ date = datetime.date.today() # today date_str = self.trans_date(date) key = self.key('browser-day', date_str) with manager_redis(self.db) as redis: redis.incrby(key, amount=1)
def check_code_retrieve(self, key): """校验唯一凭证并返回凭证值后删除凭证""" try: with manager_redis(self.db) as redis: pipe = redis.pipeline() identity = pipe.get(key) pipe.delete(key) pipe.execute() except Exception as e: return False, None if identity: # 存在 return True, identity
def record_buy_category(self, sender, category, date, **kwargs): """ 当用户购买某一商品后,记录该商品种类被购买+1 用于每一天哪些类型的商品销售量多---> 每一月 ----> 每一季度 ----> 每一年 有序集合,以日为key :param sender:发送者 :param category:商品类型 :param kwargs:额外参数 :return: """ with manager_redis(self.db) as redis: date = datetime.date.today() # today date_str = self.trans_date(date) zset_key = self.key('buy-category', date_str) redis.zincrby(zset_key, amount=1, value=category) # 默认为1,方便排行
def record_user_recommendation(self, sender, category, instance, **kwargs): """ 每个用户维护一个hash表,hash表内部填充用户收入收藏夹的商品种类,浏览足迹商品的种类,购买商品的种类的次数 生存周期3天,以便实时喂给算法新的数据集,同时避免占用过多内存 :param sender: 发送者 :param category: 种类 :param instance: User实例 :param kwargs: 额外参数 :return: """ with manager_redis(self.db) as redis: pipe = redis.pipeline() hash_key = self.key('love-category', instance.pk) pipe.zincrby(hash_key, amount=1, value=category) # 默认从1开始 pipe.expire(hash_key, 259200) # 活三天 pipe.execute()
def edit_one_good(self, user_id, **kwargs): """ edit the information of goods which stored in redis based on commodity_id and store_id and counts :param user_id: :param kwargs: 额外参数,包含store_id,good_id,way(add or minus) :return:bool """ with manager_redis(self.db) as redis: if 'store_id' not in kwargs or 'good_id' not in kwargs or 'way' not in kwargs: return False try: store_id = int(kwargs['store_id']) first_key = self.key('Cart', user_id, 'store') store_list = [ int(i.decode()) for i in redis.lrange(first_key, 0, -1) ] # O(n) if store_id not in store_list: # 如果该用户名下没有该商铺,则返回删除失败 return False else: good_id = int(kwargs['good_id']) signal_good = Commodity.commodity_.get( pk=good_id) # 查询当前商品的单价和打折情况 signal_good_price = signal_good.price signal_good_discounts = signal_good.discounts second_key = self.key('Cart', user_id, store_id, 'counts') third_key = self.key('Cart', user_id, store_id, 'price') if kwargs['way'] == 'add': redis.zincrby(second_key, 1, good_id) # 对id为good_id的商品数量加1,O(1) redis.zincrby(third_key, float(signal_good_discounts * signal_good_price), good_id) # 对id为good_id的商品数量加上优惠价,O(1) elif kwargs['way'] == 'minus': redis.zincrby(second_key, -1, good_id) redis.zincrby( third_key, -(float( signal_good_discounts * signal_good_price)), good_id) # self.redis.zadd(third_key, # {good_id: float(signal_good_price * signal_good_discounts * int(kwargs['counts']))}) except Exception as e: consumer_logger.error(e) return False else: return True
def delete_foot_commodity_id(self, user_id, **kwargs): """ 删除用户单/多条浏览记录 :param user_id:用户id :param kwargs:request.data的Querydict实例 :return:boolean """ with manager_redis(self.db) as redis: try: key = self.key('foot', user_id) if kwargs.get('is_all', None): # 是否删除所有足迹 commodity_id = kwargs.get('commodity_id') delete_counts = redis.zrem(key, commodity_id) # 移除zset中某商品号元素 else: delete_counts = redis.delete(key) # 删除全部的记录 return True if delete_counts else False except Exception as e: consumer_logger.error(e) return False
def get_foot_commodity_id_and_page(self, user_id, **kwargs): """ obtain limit list of commodity that stored in redis(use zset有序集合) to hit database :param user_id:用户id :param kwargs:request.data的Querydict实例 :return:Dict """ with manager_redis(self.db) as redis: try: key = self.key('foot', user_id) page = kwargs.get('page', 1) count = kwargs.get('page_size') # zrevrange 返回 [(name,score),...] commodity_dict = {int(name): score for name, score in redis.zrevrange(key, (page - 1) * count, page * count, withscores=True)} return commodity_dict except Exception as e: consumer_logger.error(e) return None
def add_foot_commodity_id(self, user_id, validated_data): """ 消费者浏览某个商品,添加足迹 :param validated_data: 验证后的数据 :param user_id:用户id :return:boolean """ # add_foot.apply_async(args=(pickle.dumps(self), user_id, validated_data)) # can't pickle _thread.lock objects with manager_redis(self.db) as redis: try: key = self.key('foot', user_id) timestamp = self.score # 毫秒级别的时间戳 commodity_id = validated_data['pk'] # pipe = self.redis.pipeline() # 添加管道,减少客户端和服务端之间的TCP包传输次数 redis.zadd(key, {commodity_id: timestamp}) # 分别表示用户id(加密),当前日期+时间戳(分数值),商品id # 每个用户最多缓存100条历史记录 if redis.zcard(key) >= 100: # 集合中key为键的数量 redis.zremrangebyrank(key, 0, 0) # 移除时间最早的那条记录 # pipe.execute() return True except Exception as e: consumer_logger.error(e) return False
def record_login_user_browsing_times(self, sender, instance, **kwargs): """ 1.记录当天用户登录的总人数 每天24:00点,执行定时任务,统计后以key-value存储,释放bitmap空间 2.记录某用户每月登录的次数,按月大统计一次 每月第一天,执行定时任务,统计后,释放bitmap空间 :param sender: 发送者 :param instance: User实例 :param kwargs: 额外参数 :return: """ date = datetime.date.today() # today with manager_redis(self.db) as redis: pipe = redis.pipeline() date_str = self.trans_date(date) # offset:user_pk key = self.key('login-day', date_str) pipe.setbit(key, instance.pk, 1) year, month, day = self.trans_date_offset(date) # offset:day key = self.key('login', year, month, instance.pk) pipe.setbit(key, day, 1) # 尽可能节约内存 pipe.execute()
def set_order_expiration(self, pk): """设置订单过期30min时间""" with manager_redis(self.db) as redis: key = OrderCreateSerializer.generate_orderid(pk) redis.setex(key, 3000, 1)
def get_shop_cart_id_and_page(self, user_id, **data): """ Retrieve all stores under current user's shopping cart from redis, and the goods stored in this shopping cart under these stores 从redis中取出该用户购物车下的所有店铺,以及这些店铺下的存入的购物车商品 list + zset结构 :param user_id:用户id :param data:HttpRequest.GET数据 :return: dict key:include(store id),value: include(commodity list) """ with manager_redis(self.db) as redis: store_and_commodity = {} # 商铺和商品映射 commodity_and_counts = {} # 商品和其对应的数量之间的映射 commodity_and_price = {} # 商品和其对应的数量乘积的总价格的映射 try: limit = 3 if 'limit' not in data else int(data.get('page')[0]) page = int(data.get('page')[0]) start = (page - 1) * limit end = page * limit first_key = self.key('Cart', user_id, 'store') # 用于存放用户购物车内商品所属的店铺,键 store_counts = redis.llen(first_key) # 购物车所含有的店铺总数,O(1) store_value_decode = [ int(i.decode()) for i in redis.lrange(first_key, start, end) ] # 商铺id列表解码,O(N) for store in store_value_decode: commodity_id = [] # 该用户购物车下的每个商铺下的商品id second_key = self.key( 'Cart', user_id, store, 'counts') # 用于存放用户购物车内每个商铺有关商品数量对应的商品id,键 third_key = self.key( 'Cart', user_id, store, 'price') # 用于存放用户购物车内每个商铺有关商品总价格对应的商品id,键 # 取出购物车中每个商铺中对应全部的商品id,进行解码 # 时间复杂度O(log(n)+m),分数为float型,需转, m为成员数量 # TODO:重构添加 withscore=True,用pipe减小网络延迟 counts_list = [ int(value.decode()) for value in redis.zrevrange(second_key, 0, -1) ] price_list = [ int(value.decode()) for value in redis.zrevrange(third_key, 0, -1) ] for value in counts_list: commodity_and_counts[value] = float( redis.zscore(second_key, value)) for value in price_list: commodity_id.append(value) commodity_and_price[value] = float( redis.zscore(third_key, value)) store_and_commodity.setdefault(store, commodity_id) common_logger.info(store_and_commodity) page = math.ceil(store_counts / limit) return store_and_commodity, commodity_and_price, commodity_and_counts, page except Exception as e: consumer_logger.error(e) return None, None, 0
def get_resultSet(self, user, page, page_size, **kwargs): """ 考虑缓存击穿:即使空结果集也放到缓存中去 redis使用有序集合+hash表+List实现数据存储和获取 确保有序,使用有序集合,在redis层面提升排序性能 :return: list(读取redis), 查询集(读取mysql) """ with manager_redis(self.db) as redis: user_pk = user.pk zset_key = self.zset_key_commodity(user_pk) collection_dict = { int(key.decode()): value for key, value in redis.zrevrange(zset_key, (page - 1) * page_size, page * page_size, withscores=True) } if 0 in collection_dict: # 如果数据库未命中,缓存中的0代表空数据 return 'null' if collection_dict: # 缓存命中,寻找对应的商品hash表 list_resultSet = [] for key in collection_dict: hash_key = self.hash_key_commodity(user_pk, key) # 会不会内存泄漏? offset, hresult = redis.hscan(hash_key) # hscan扫描获取数据 while offset != 0: hresult.update(redis.hscan( hash_key, cursor=offset)) # 从上一次的offset中读取,防止hscan一次性没读完 list_resultSet.append(hresult) # list扔到任务队列中去将字典中的bytes-->str,解析,转换成我需要的数据格式 # list_result_format = format_data.apply_async(args=(list_resultSet,)) # 异步任务反而变慢了,不接受byte数据 list_result_format = self.deserializer_commodity_data( list_resultSet) # 反序列化 return list_result_format # 未命中缓存 queryset = Collection.collection_.select_related( 'commodity').filter(user=user)[(page - 1) * page_size:page * page_size] pipe_two = redis.pipeline() # 建立管道 if queryset: commodity_list = [query.commodity for query in queryset] # 商品查询集 serializer_commodity = serializers.serialize( 'python', commodity_list) # 对商品集序列化 for serializer, query in zip(serializer_commodity, queryset): # 将同迭代次数的可迭代元素打包程元祖 commodity_pk = serializer.get('pk') # 获取每个商品的pk commodity_fields = serializer.get( 'fields') # 获取每个商品的fields commodity_fields.update({'pk': commodity_pk}) # 将pk添加进fields字典中 collection_pk = query.pk # 收藏记录的pk pipe_two.zadd(zset_key, {collection_pk: time.time()}) # 添加到有序集合中 pipe_two.expire(zset_key, 30) # 设置15sTTL hash_value_commodity = self.hash_key_commodity( user_pk, collection_pk) pipe_two.hmset(hash_value_commodity, self.serializer_commodity_data( commodity_fields)) # 将商品信息添加到hash表中 pipe_two.expire(hash_value_commodity, 30) else: pipe_two.zadd(zset_key, {0: 0}) # 防止缓存击穿 pipe_two.expire(zset_key, 6) # TTL为6 pipe_two.execute() return queryset # 返回商品查询集
def add_count(self, key): """add one count when user login""" with manager_redis(self.db) as redis: key = self.get_key redis.incr(key, 1)
def get_visit_counts(self, key): """statistic the number of users intraday""" with manager_redis(self.db) as redis: counts = redis.get(key) return counts
def delete_search_single(self, sender, request, key, **kwargs): """ 单删某条搜索历史记录 """ with manager_redis(self.DB, type(self)) as redis: redis.zrem(self.user_key(sender), key)