def __init__(self, db=None, cache=None): if db: self._db = db else: self._db = motorclient.fbt if cache: self._cache = cache else: self._cache = RedisCacheClient().get_instance()
def initialize(cls, sync_db, async_db): assert sync_db is not None and async_db is not None cls._sync_db = sync_db cls._async_db = async_db cls._redis = RedisCacheClient().get_instance() cls._executor = ThreadPoolExecutor(max_workers=10) cls._init_subscribe()
def get_res_list(self): # user = self.get_argument("user", None) page = self.get_argument("page", 1) sort_by = self.get_argument( "sort_by", ResourceStoreManager.res_sort_by["online_num"]) res_type = self.get_argument("res_type", ResourceInfo.get_main_index_by_type("电影")) try: page = int(page) assert page >= 0 sort_by = int(sort_by) assert sort_by == ResourceStoreManager.res_sort_by["online_num"] res_type = int(res_type) assert ResourceInfo.is_valid_main_type(res_type) except: util.errorHandle(self, 0) self.finish() return cache_key = "fb:online:type:" + str(res_type) + ":page:" + str(page) redis_cache = RedisCacheClient().get_instance() RES_CNT_IN_A_PAGE = 20 msg = {} msg["type"] = 1 msg["error"] = "" resource_list = redis_cache.get(cache_key) if resource_list: resource_list = json.loads(resource_list) else: resource_list = yield OnlineResources.get_online_resources_by_type( res_type, page, RES_CNT_IN_A_PAGE) redis_cache.set(cache_key, json.dumps(resource_list)) ONE_MINUTES = 1 * 60 redis_cache.expire(cache_key, ONE_MINUTES) if self.get_argument("version", "") >= "1.8": size = OnlineResources.get_online_resources_count(res_type) util.write( self, 1, "", { "size": (size + RES_CNT_IN_A_PAGE - 1) / RES_CNT_IN_A_PAGE, "res": resource_list }) else: util.write(self, 1, "", resource_list) self.finish()
def get_res_list(self): # user = self.get_argument("user", None) page = self.get_argument("page", 1) sort_by = self.get_argument("sort_by", ResourceStoreManager.res_sort_by["online_num"]) res_type = self.get_argument("res_type", ResourceInfo.get_main_index_by_type("电影")) try: page = int(page) assert page>=0 sort_by = int(sort_by) assert sort_by == ResourceStoreManager.res_sort_by["online_num"] res_type = int(res_type) assert ResourceInfo.is_valid_main_type(res_type) except: util.errorHandle(self, 0) self.finish() return cache_key="fb:online:type:"+str(res_type)+":page:"+str(page) redis_cache=RedisCacheClient().get_instance() RES_CNT_IN_A_PAGE=20 msg = {} msg["type"] = 1 msg["error"] = "" resource_list = redis_cache.get(cache_key) if resource_list: resource_list=json.loads(resource_list) else: resource_list = yield OnlineResources.get_online_resources_by_type(res_type, page, RES_CNT_IN_A_PAGE) redis_cache.set(cache_key, json.dumps(resource_list)) ONE_MINUTES=1 * 60 redis_cache.expire(cache_key,ONE_MINUTES) if self.get_argument("version", "") >= "1.8": size = OnlineResources.get_online_resources_count(res_type) util.write(self, 1, "", {"size":size, "res":resource_list}) else: util.write(self, 1, "", resource_list) self.finish()
def init(cls, async_db): assert async_db is not None cls._async_db = async_db cls._redis = RedisCacheClient().get_instance()
class DownloadMedium(object): download_type = { "None": 0, "V4_LAN": 1, "V4_NAT": 2, "V6": 3, "V4_NOT_ALLOW": 4 } _redis_cache = RedisCacheClient().get_instance() @classmethod def get_online_owners_num_of_res(cls, file_id): try: return cls._redis_cache.scard(CACHE_OWNER_OF_RESOURCE + file_id) except TimeoutError as e: return 0 @classmethod def get_online_owners_of_res(cls, file_id): return cls._redis_cache.smembers(CACHE_OWNER_OF_RESOURCE + file_id) @classmethod def get_matched_online_owners(cls, my_uid, res_users, allowV4Download=False, need_romove_self=False): assert my_uid > 0 online_owners = filter(UserIPCache.user_online, res_users) if need_romove_self: if online_owners.count(my_uid): # remove myself online_owners.remove(my_uid) if len(online_owners): if (UserIPCache.user_online(my_uid)): shouldCheckV4 = True my_ipv6_addrs = HttpServerInfoCache.get_user_ipv6(my_uid) if isinstance(my_ipv6_addrs, set): assert len(my_ipv6_addrs) > 0 v6_owners = [] for user in online_owners: v6_addrs = HttpServerInfoCache.get_user_ipv6(user) if isinstance(v6_addrs, set): for addr in v6_addrs: if isinstance(addr, Address): assert IP.is_valid_ipv6_address( addr.get_host()) v6_owners.append({ "uid": user, "host": addr.get_host(), "port": addr.get_port() }) else: assert IP.is_valid_ipv6_address(addr) v6_owners.append({ "uid": user, "host": addr, "port": 8886 }) if len(v6_owners): return { "owners": v6_owners, "download_type": cls.download_type["V6"] } elif not allowV4Download: shouldCheckV4 = False if shouldCheckV4: my_ipv4 = UserIPCache.get_user_ip(my_uid) assert IP.is_valid_ipv4_address(my_ipv4) same_ip_users = filter( lambda user: UserIPCache.get_user_ip(user) == my_ipv4, online_owners) if len(same_ip_users): # LAN USER v4_owners = [] for user in same_ip_users: addr = HttpServerInfoCache.get_user_ipv4(user) if addr is not None: if isinstance(addr, Address): if IP.is_valid_ipv4_address( addr.get_host()): v4_owners.append(user) else: if IP.is_valid_ipv4_address(addr): v4_owners.append(user) grep_owners = [] for user in v4_owners: addr = HttpServerInfoCache.get_user_ipv4(user) if addr is not None: if isinstance(addr, Address): grep_owners.append({ "uid": user, "host": addr.get_host(), "port": addr.get_port() }) else: grep_owners.append({ "uid": user, "host": addr, "port": 8884 }) return { "owners": grep_owners, "download_type": cls.download_type["V4_LAN"] } else: # NAT USER #V4 user cant download v6's resource return { "owners": [], "download_type": cls.download_type["None"] } # TODO # valid_v4_owners = [user for user in online_owners if # IP.is_valid_ipv4_address(UserIPCache.get_user_ip( # user))] #and not isinstance(HttpServerInfoCache.get_user_ipv6(user),set)] # grep_owners = [{"uid": user, "host": UserIPCache.get_user_ip(user), "port": 8884} for user in # valid_v4_owners] #CAUTION: if user has a external IP, he cant open local LAN server. SO there must use get_user_ip, but not get_user_ipv4 # return {"owners": grep_owners, "download_type": cls.download_type["V4_NAT"]} else: return { "owners": [], "download_type": cls.download_type["V4_NOT_ALLOW"] } else: return { "owners": [], "download_type": cls.download_type["None"] } else: return {"owners": [], "download_type": cls.download_type["None"]} # TODO fix download user has ipv6, but node websocket has a bug. Server can't send msg to client socket. # TODO So I allow all user download through ipv6. # TODO add unit test for download @classmethod def get_matched_online_owners2(cls, my_uid, res_users, allowV4Download=False, need_romove_self=False): assert my_uid > 0 online_owners = filter(UserIPCache.user_online, res_users) if need_romove_self: if online_owners.count(my_uid): # remove myself online_owners.remove(my_uid) if len(online_owners): #if (UserIPCache.user_online(my_uid)): shouldCheckV4 = True # my_ipv6_addrs = HttpServerInfoCache.get_user_ipv6(my_uid) # if isinstance(my_ipv6_addrs, set): # assert len(my_ipv6_addrs) > 0 v6_owners = [] for user in online_owners: v6_addrs = HttpServerInfoCache.get_user_ipv6(user) if isinstance(v6_addrs, set): for addr in v6_addrs: if isinstance(addr, Address): assert IP.is_valid_ipv6_address(addr.get_host()) v6_owners.append({ "uid": user, "host": addr.get_host(), "port": addr.get_port() }) else: assert IP.is_valid_ipv6_address(addr) v6_owners.append({ "uid": user, "host": addr, "port": 8886 }) if len(v6_owners): return { "owners": v6_owners, "download_type": cls.download_type["V6"] } elif not allowV4Download: shouldCheckV4 = False if shouldCheckV4: my_ipv4 = UserIPCache.get_user_ip(my_uid) assert IP.is_valid_ipv4_address(my_ipv4) same_ip_users = filter( lambda user: UserIPCache.get_user_ip(user) == my_ipv4, online_owners) if len(same_ip_users): # LAN USER v4_owners = [] for user in same_ip_users: addr = HttpServerInfoCache.get_user_ipv4(user) if addr is not None: if isinstance(addr, Address): if IP.is_valid_ipv4_address(addr.get_host()): v4_owners.append(user) else: if IP.is_valid_ipv4_address(addr): v4_owners.append(user) grep_owners = [] for user in v4_owners: addr = HttpServerInfoCache.get_user_ipv4(user) if addr is not None: if isinstance(addr, Address): grep_owners.append({ "uid": user, "host": addr.get_host(), "port": addr.get_port() }) else: grep_owners.append({ "uid": user, "host": addr, "port": 8884 }) return { "owners": grep_owners, "download_type": cls.download_type["V4_LAN"] } else: # NAT USER #V4 user cant download v6's resource return { "owners": [], "download_type": cls.download_type["None"] } # TODO # valid_v4_owners = [user for user in online_owners if # IP.is_valid_ipv4_address(UserIPCache.get_user_ip( # user))] #and not isinstance(HttpServerInfoCache.get_user_ipv6(user),set)] # grep_owners = [{"uid": user, "host": UserIPCache.get_user_ip(user), "port": 8884} for user in # valid_v4_owners] #CAUTION: if user has a external IP, he cant open local LAN server. SO there must use get_user_ip, but not get_user_ipv4 # return {"owners": grep_owners, "download_type": cls.download_type["V4_NAT"]} else: return { "owners": [], "download_type": cls.download_type["V4_NOT_ALLOW"] } #else: # return {"owners": [], "download_type": cls.download_type["None"]} else: return {"owners": [], "download_type": cls.download_type["None"]} @classmethod def get_online_file_owner(cls, my_uid, file_id, allowV4Download=False, dir_id=None): my_uid = long(my_uid) assert my_uid >= 0 if dir_id: fid = dir_id else: fid = file_id res_users = [long(uid) for uid in cls.get_online_owners_of_res(fid)] return cls.get_matched_online_owners2(my_uid, res_users, allowV4Download, True)
class FBRankManager(object): # _fb_vary_detector=None # _study_fb_vary_detector=None # _redis_cache = None _redis_cache = RedisCacheClient().get_instance() _FB_WEEKLY_CACHE = FB_WEEKLY_CACHE _FB_MONTHLY_CACHE = FB_MONTHLY_CACHE _STUDY_FB_WEEKLY_CACHE = STUDY_FB_WEEKLY_CACHE _STUDY_FB_MONTHLY_CACHE = STUDY_FB_MONTHLY_CACHE @classmethod def set_cache(cls, cache): cls._redis_cache = cache # @classmethod # def fb_vary_processor(cls,msg): # fb_vary=json.loads(msg) # # if len(fb_vary)<100: # # print "warning msg too few..." # # print "receive coin vary:",msg # pipe = cls._redis_cache.pipeline() # for uid,delta_fb in fb_vary.iteritems(): # # pipe.hsetnx(cls._FB_WEEKLY_CACHE,uid,0) # # pipe.hsetnx(cls._FB_MONTHLY_CACHE,uid,0) # pipe.hincrbyfloat(cls._FB_WEEKLY_CACHE,uid,delta_fb) # pipe.hincrbyfloat(cls._FB_MONTHLY_CACHE,uid,delta_fb) # pipe.execute() # # @classmethod # def study_fb_vary_processor(cls,msg): # fb_vary=json.loads(msg) # pipe = cls._redis_cache.pipeline() # for uid,delta_fb in fb_vary.iteritems(): # # cls._redis_cache.hsetnx(cls._STUDY_FB_WEEKLY_CACHE,uid,0) # # cls._redis_cache.hsetnx(cls._STUDY_FB_MONTHLY_CACHE,uid,0) # pipe.hincrby(cls._STUDY_FB_WEEKLY_CACHE,uid,delta_fb) # pipe.hincrby(cls._STUDY_FB_MONTHLY_CACHE,uid,delta_fb) # pipe.execute() @classmethod def initialize(cls, need_clear_fb_cache=False): pass # cls._fb_vary_detector=RedisSubscribeClient(cls.fb_vary_processor) # cls._study_fb_vary_detector=RedisSubscribeClient(cls.study_fb_vary_processor, CHANNEL_STUDY_COIN_VARY) # if need_clear_fb_cache: # cls.reset_weekly_fb() # cls.reset_monthly_fb() @classmethod def get_monthly_top(cls): return cls._get_top_helper(False) @classmethod def get_weekly_top(cls): return cls._get_top_helper(True) @classmethod def get_weekly_top2(cls): return cls._get_top_helper(True, True) @classmethod def get_monthly_top2(cls): return cls._get_top_helper(False, True) @classmethod def fb_vary_cmp(cls, x, y): if x[1] == y[1]: return cmp(y[0], x[0]) else: return cmp(y[1], x[1]) @classmethod def sorted_fb_rank(cls, is_weekly, is_by_study=False): if is_by_study: if is_weekly: fb_var = cls._redis_cache.hgetall(cls._STUDY_FB_WEEKLY_CACHE) else: fb_var = cls._redis_cache.hgetall(cls._STUDY_FB_MONTHLY_CACHE) else: if is_weekly: fb_var = cls._redis_cache.hgetall(cls._FB_WEEKLY_CACHE) else: fb_var = cls._redis_cache.hgetall(cls._FB_MONTHLY_CACHE) fb_var2 = [(int(uid), int(float(fb))) for uid, fb in fb_var.iteritems() if int(float(fb)) > 0] sorted_fb = sorted(fb_var2, cmp=cls.fb_vary_cmp) return sorted_fb @classmethod def _get_top_helper(cls, is_weekly=True, is_by_study=False): sorted_fb = cls.sorted_fb_rank(is_weekly, is_by_study) # return fb_var2.sort(cls.fb_vary_cmp) return sorted_fb[:100] @classmethod def reset_weekly_fb(cls): for k in (cls._FB_WEEKLY_CACHE, cls._STUDY_FB_WEEKLY_CACHE): keys = cls._redis_cache.hkeys(k) if len(keys) > 0: cls._redis_cache.hdel(k, *keys) @classmethod def reset_monthly_fb(cls): for k in (cls._FB_MONTHLY_CACHE, cls._STUDY_FB_MONTHLY_CACHE): keys = cls._redis_cache.hkeys(k) if len(keys) > 0: cls._redis_cache.hdel(k, *keys) @classmethod def finalize(cls): pass
from redis_cache_client import RedisCacheClient import mongoclient db = mongoclient.fbt cache = RedisCacheClient().get_instance() _FB_WEEKLY_CACHE = "fb:cache:weekly" _FB_MONTHLY_CACHE = "fb:cache:monthly" def fix_rank(key): fb_var = cache.hgetall(key) def fb_vary_cmp(x, y): if x[1] == y[1]: return cmp(y[0], x[0]) else: return cmp(y[1], x[1]) fb_var2 = [(int(uid), int(float(fb))) for uid, fb in fb_var.iteritems() if int(float(fb)) > 0] sorted_fb = sorted(fb_var2, cmp=fb_vary_cmp) fb_user = sorted_fb #[:1000] for uid, coin in fb_user: c = db.coins_of_user.find_one({"uid": uid}, {"total_coins": 1}) if c and "total_coins" in c: if c["total_coins"] < coin: cache.hset(key, uid, int(c["total_coins"]) - 1) #print "hset ....."
class FBTUserResourceManager(object): EXPIRE_TIME = 300 * 60 def __init__(self, db=None, cache=None): if db: self._db = db else: self._db = motorclient.fbt if cache: self._cache = cache else: self._cache = RedisCacheClient().get_instance() def key_of(self, uid): return USER_RES_CACHE_KEY+str(uid) @gen.coroutine def get_resource_of_user(self, uid): uid = long(uid) key = self.key_of(uid) res_list = self._cache.lrange(key, 0, -1) if res_list: raise gen.Return(res_list) else: resource_of_user = yield self.get_collection().find_one({'uid': uid}, {'file_ids': 1}) if resource_of_user and 'file_ids' in resource_of_user: ret = resource_of_user["file_ids"][::-1] pipe = self._cache.pipeline() for file_id in ret: pipe.rpush(key, file_id) pipe.expire(key, self.EXPIRE_TIME) pipe.execute() raise gen.Return(ret) else: raise gen.Return([]) def get_resource_of_user2(self, uid, sync_db): uid = long(uid) key = self.key_of(uid) res_list = self._cache.lrange(key, 0, -1) if res_list: return res_list else: resource_of_user = sync_db.resources_of_user.find_one({'uid': uid}, {'file_ids': 1}) if resource_of_user and 'file_ids' in resource_of_user: ret = resource_of_user["file_ids"][::-1] pipe = self._cache.pipeline() for file_id in ret: pipe.rpush(key, file_id) pipe.expire(key, self.EXPIRE_TIME) pipe.execute() return ret else: return [] def get_collection(self): return self._db.resources_of_user @gen.coroutine def add_to_my_resource_list(self, uid, file_id): uid = long(uid) key = self.key_of(uid) pipe = self._cache.pipeline() pipe.lpush(key, file_id) pipe.expire(key, self.EXPIRE_TIME) pipe.execute() yield self.get_collection().update({"uid": uid}, {"$push": {"file_ids": file_id}}, True) @gen.coroutine def add_file_in_dir_to_my_resource_list(self, uid, file_ids): # file_ids is a list of file_id assert (isinstance(file_ids, list)) uid = long(uid) key = self.key_of(uid) pipe = self._cache.pipeline() for file_id in file_ids: pipe.lpush(key, file_id) # pipe.lpush(key, file_ids) pipe.expire(key, self.EXPIRE_TIME) pipe.execute() yield self.get_collection().update({"uid": uid}, {"$push": {"file_ids": {"$each": file_ids}}}, True) @gen.coroutine def remove_from_my_resource_list(self, uid, file_id): uid = long(uid) modified_doc = yield self.get_collection().find_and_modify({"uid": uid}, {"$pull": {"file_ids": file_id}}, new=True) key = self.key_of(uid) self._cache.lrem(key, 0, file_id) raise gen.Return(modified_doc) single_instance = None @classmethod def instance(cls): if cls.single_instance is None: cls.single_instance = FBTUserResourceManager() return cls.single_instance def set_db_cache(self, db, cache): self._db = db self._cache = cache