def getFacebook(accessToken, path, params=None): if params is None: params = {} num_retries = 0 max_retries = 5 params['access_token'] = accessToken while True: try: baseurl = 'https://graph.facebook.com' encoded_params = urllib.urlencode(params) url = "%s%s?%s" % (baseurl, path, encoded_params) result = json.load(urllib2.urlopen(url)) if 'error' in result: if 'type' in result['error'] and result['error']['type'] == 'OAuthException': # OAuth exception raise raise return result except urllib2.HTTPError as e: logs.warning('Facebook API Error: %s' % e) num_retries += 1 if num_retries > max_retries: if e.code == 400: raise StampedInputError('Facebook API 400 Error') raise StampedUnavailableError('Facebook API Error') logs.info("Retrying (%s)" % (num_retries)) time.sleep(0.5) except Exception as e: raise Exception('Error connecting to Facebook: %s' % e)
def _try_set_cache(self, key, value, ttl=600): try: self._cache.set(key, value, time=ttl) except Exception as e: logs.warning("Unable to set cache: %s" % e) return value
def _api_keys(): """ Singleton for api-key definitions. On first load, opens keys/apikeys-<stack>.conf to load stack-specific api keys """ global __api_keys, __stack_name, __keys_dir if __api_keys is not None: return __api_keys filename = 'apikeys-%s.conf' % __stack_name apikeys_path = '%s/%s' % (__keys_dir, filename) print ('apikeys_path: %s' % apikeys_path) try: meta = {} if os.path.exists(apikeys_path): with open(apikeys_path, "rb") as fp: source = fp.read() exec compile(source, apikeys_path, "exec") in meta else: logs.warning("### Could not find '%s': no limits defined" % apikeys_path) return except Exception as e: logs.warning("Exception while trying to execute '%s' file: %s" % (filename, e)) return __api_keys = meta['keys'] return __api_keys
def _setCachedUserMini(self, user): key = str("obj::usermini::%s" % user.user_id) ttl = 60 * 10 # 10 minutes try: self._cache.set(key, user, time=ttl) except Exception as e: logs.warning("Unable to set cache for %s: %s" % (user.user_id, e))
def remove_activity_links(self, activity_ids): try: self._collection.remove({'activity_id': {'$in': activity_ids}},) return True except Exception as e: logs.warning("Cannot remove documents: %s" % e) raise
def remove_activity_link_for_user(self, activity_id, user_id): try: self._collection.remove({'activity_id': activity_id}, {'user_id': user_id}) return True except Exception as e: logs.warning("Cannot remove document: %s" % e) raise
def init(self, binary=False, behaviors=None): try: memcached_nodes = [] if utils.is_ec2(): stack = ec2_utils.get_stack() for node in stack.nodes: if 'mem' in node.roles: memcached_nodes.append(node.private_ip_address) if 0 == len(memcached_nodes): raise Exception("[%s] unable to any find memcached servers" % self) else: # running locally so default to localhost memcached_nodes.append('127.0.0.1') self._client = pylibmc.Client(memcached_nodes, binary=binary, behaviors=behaviors) # Verify it works self._client.set('test', 'test', time=0) except Exception, e: logs.warning("[%s] unable to initialize memcached (%s)" % (self, e)) self._client = None return False
def _verify_new_library_and_get_items(self, create_if_not_found=False): # Check if the new library exists in Plex try: new_library = self.plex.server.library.section( self.recipe['new_library']['name']) logs.warning(u"Library already exists in Plex. Scanning the library...") new_library.update() except plexapi.exceptions.NotFound: if create_if_not_found: self.plex.create_new_library( self.recipe['new_library']['name'], self.recipe['new_library']['folder'], self.library_type) new_library = self.plex.server.library.section( self.recipe['new_library']['name']) else: raise Exception("Library '{library}' does not exist".format( library=self.recipe['new_library']['name'])) # Wait for metadata to finish downloading before continuing logs.info(u"Waiting for metadata to finish downloading...") new_library = self.plex.server.library.section( self.recipe['new_library']['name']) while new_library.refreshing: time.sleep(5) new_library = self.plex.server.library.section( self.recipe['new_library']['name']) # Retrieve a list of items from the new library logs.info(u"Retrieving a list of items from the '{library}' library in " u"Plex...".format(library=self.recipe['new_library']['name'])) return new_library, new_library.all()
def fail(self, request, response, content): if self.fail_limit is None or self.fail_period is None or self.blackout_wait is None: return now = time.time() ### Was getting deque() corruption error when the server was uploaded with requests. This is to help prevent that. self.__semaphore.acquire() self.__fails.append(self.FailLog(request.url, request.body, request.headers, response.status, content)) cutoff = now - self.fail_period count = 0 while len(self.__fails) > 0: if self.__fails[0].timestamp < cutoff: self.__fails.popleft() else: break count = len(self.__fails) self.__semaphore.release() if count > self.fail_limit: logs.warning("hit fail limit for service '%s'" % self.__service_name) self.blackout_start = time.time() # Email dev if a fail limit was reached if is_ec2(): self.sendFailLogEmail()
def work(self): self.__processes = [] sleep_time = 5 * 60 last_sleep = time.time() while not self.__shutdown: if len(self.__processes) == self.__count: p = self.__processes.pop(0) p.terminate() exitstatus = p.wait() if exitstatus != -15: print 'Problem!!!! %s' % exitstatus for p in self.__processes: returnstatus = p.poll() if returnstatus is not None: if returnstatus == 0 and self.__shutdown: #ignore pass else: logs.warning('Process exited with code %s' % returnstatus) sleep_until = last_sleep + sleep_time while True: sleep_remaining = sleep_until - time.time() if sleep_remaining > 0: time.sleep(min(sleep_remaining, 2)) else: break last_sleep = time.time() else: print 'adding process %i' % len(self.__processes) self.addProcess() for p in self.__processes: print 'killing process %i' % len(self.__processes) p.terminate() for p in self.__processes: p.wait()
def getFriendsOfFriends(self, userId, distance=2, inclusive=True): if distance <= 0: logs.warning('Invalid distance for friends of friends: %s' % distance) raise Exception friends = {0: set([userId])} maxDistance = distance def visitUser(userId, distance): friendIds = self.friends_collection.getFriends(userId) if distance not in friends: friends[distance] = set() for friendId in friendIds: friends[distance].add(friendId) if distance < maxDistance: visitUser(friendId, distance + 1) visitUser(userId, 1) result = set([]) if distance in friends: result = friends[distance] if not inclusive: prevDistance = distance - 1 while prevDistance >= 0: if prevDistance in friends: result = result.difference(friends[prevDistance]) prevDistance = prevDistance - 1 return list(result)
def _setCachedComment(self, comment): key = str("obj::comment::%s" % comment.comment_id) cacheLength = 60 * 10 # 10 minutes try: self._cache.set(key, comment, time=cacheLength) except Exception as e: logs.warning("Unable to set cache for %s: %s" % (comment.comment_id, e))
def getHeadRequest(url, maxDelay=2): """ Robust HEAD request to ensure that the requested resource exists. Returns the response object if the resource is accessible or None otherwise. """ request = HeadRequest(url) delay = 0.5 while True: try: return urllib2.urlopen(request) except urllib2.HTTPError, e: if e.code == 404: # Not found, return immediately return None elif e.code == 403: # Amazon returns 403s periodically -- worth another shot! pass elif e.code >= 400 and e.code < 500: # reraise the exception if the request resulted in any other 4xx error code, # since it was a problem with the url / headers and retrying most likely won't # solve the problem. logs.warning("Head request %s: (%s)" % (e.code, e)) return None except (ValueError, IOError, httplib.BadStatusLine) as e: pass
def _add_id(self, guid, item): try: source, id_ = guid.split('://', 1) except ValueError: logs.warning(f"Unknown guid: {guid}") return id_ = id_.split('?')[0] if 'imdb' in source: if '/' in id_: id_ = id_.split('/')[-2] if self.matching_only and not id_ in self.match_imdb: return self.imdb[id_] = item elif 'tmdb' in source or 'themoviedb' in source: if self.matching_only and not id_ in self.match_tmdb: return self.tmdb[id_] = item elif 'tvdb' in source or 'thetvdb' in source: if '/' in id_: id_ = id_.split('/')[-2] if self.matching_only and not id_ in self.match_tvdb: return self.tvdb[id_] = item else: logs.warning(f"Unknown guid: {guid}. " f"Possibly unmatched: {item.title} ({item.year})") self.items.add(item)
def save(self, to_save, manipulate=True, safe=False, **kwargs): if self._debug: print("Mongo 'save' - manipulate: %s safe: %s kwargs: %s" % (manipulate, safe, kwargs)) num_retries = 0 max_retries = 5 storeLog = kwargs.pop('log', True) while True: try: ret = self._collection.save(to_save, manipulate, safe, **kwargs) return ret except AutoReconnect as e: num_retries += 1 if num_retries > max_retries: msg = "Unable to connect after %d retries (%s)" % \ (max_retries, self._parent.__class__.__name__) if storeLog: logs.warning(msg) raise if storeLog: logs.info("Retrying delete (%s)" % (self._parent.__class__.__name__)) time.sleep(0.25) except Exception as e: import traceback logs.warning('Failure updating document:\n%s' % ''.join(traceback.format_exc())) raise StampedSaveDocumentError("Unable to update document")
def setBasicGroup(source, target, oldName, newName=None, oldSuffix=None, newSuffix=None, additionalSuffixes=None, seed=True): if newName is None: newName = oldName if oldSuffix is None: item = source.pop(oldName, None) else: item = source.pop('%s_%s' % (oldName, oldSuffix), None) if item is not None: # Manual conversions... if oldName == 'track_length': try: item = int(str(item).split('.')[0]) except Exception as e: logs.warning("Unable to set length (%s) as integer: %s" % (item, e)) pass if newSuffix is None: setattr(target, newName, item) else: setattr(target, '%s_%s' % (newName, newSuffix), item) sourceName = 'format' if seed: sourceName = 'seed' if newName != 'tombstone': setattr(target, '%s_source' % newName, source.pop('%s_source' % oldName, sourceName)) setattr(target, '%s_timestamp' % newName, source.pop('%s_timestamp' % oldName, seedTimestamp)) if additionalSuffixes is not None: for s in additionalSuffixes: t = source.pop('%s_%s' % (oldName, s), None) if t is not None: setattr(target, '%s_%s' % (newName, s), t)
def getFriendData(self, user_token, user_secret, offset=0, limit=30): logs.info('### user_token %s user_secret: %s' % (user_token, user_secret)) if limit > 100: raise StampedInputError("Limit must be <= 100") ids = self._getUserIds(user_token, user_secret, 'friends') if offset >= len(ids): return [] url = '1/users/lookup.json' friends = [] idset = ','.join(ids[offset:offset+limit]) results = self.__get(url, user_token, user_secret, user_id=idset) for result in results: try: friends.append( { 'user_id' : result['id'], 'name' : result['name'], 'screen_name' : result['screen_name'], 'image_url' : result['profile_image_url'], } ) except TypeError as e: logs.warning("Unable to get twitter friends! Error: %s" % e) logs.info("Results: %s" % results) raise return friends
def __init__(self, msg=None, desc=None): Exception.__init__(self, msg) self.msg = msg self.desc = desc if msg is not None: logs.warning(msg)
def __init__(self, msg=None, desc=None): KeyError.__init__(self, msg) self.msg = msg self.desc = desc if msg is not None: logs.warning(msg)
def _get_trakt_lists(self): item_list = [] # TODO Replace with dict, scrap item_ids? item_ids = [] for url in self.recipe['source_list_urls']: max_age = (self.recipe['new_playlist'].get('max_age', 0) if self.use_playlists else self.recipe['new_library'].get( 'max_age', 0)) if 'api.trakt.tv' in url: (item_list, item_ids) = self.trakt.add_items(self.library_type, url, item_list, item_ids, max_age or 0) elif 'imdb.com/chart' in url: (item_list, item_ids) = self.imdb.add_items(self.library_type, url, item_list, item_ids, max_age or 0) else: raise Exception( "Unsupported source list: {url}".format(url=url)) if self.recipe['weighted_sorting']['enabled']: if self.config['tmdb']['api_key']: logs.info(u"Getting data from TMDb to add weighted sorting...") item_list = self.weighted_sorting(item_list) else: logs.warning(u"Warning: TMDd API key is required " u"for weighted sorting") return item_list, item_ids
def find(self, spec=None, output=None, limit=None, **kwargs): if self._debug: print("Mongo 'find' - spec: %s output: %s limit: %s kwargs: %s" % (spec, output, limit, kwargs)) num_retries = 0 max_retries = 5 while True: try: ret = self._collection.find(spec, **kwargs) if limit is not None: ret = ret.limit(limit) if output is not None: if output == list: ret = list(ret) return ret except AutoReconnect as e: num_retries += 1 if num_retries > max_retries: msg = "Unable to connect after %d retries (%s)" % \ (max_retries, self._parent.__class__.__name__) logs.warning(msg) raise logs.info("Retrying find (%s)" % (self._parent.__class__.__name__)) time.sleep(0.25)
def entityProxyFromKey(self, key, **kwargs): try: return TMDBMovie(key) except KeyError: logs.warning('Unable to find TMDB item for key: %s' % key) raise return None
def wrapper(*args, **kwds): key = args if kwds: key += (kwd_mark,) + tuple(sorted(kwds.items())) use_count[key] += 1 # get cache entry or compute if not found try: result = cache[key] wrapper.hits += 1 except KeyError: result = user_function(*args, **kwds) cache[key] = result wrapper.misses += 1 # purge least frequently used cache entry if len(cache) > maxsize: for key, _ in nsmallest(maxsize // 10, use_count.iteritems(), key=itemgetter(1)): del cache[key], use_count[key] try: result = copy.deepcopy(result) except Exception: logs.warning('Calling @lru_cach\'ed function %s with arguments that are not deep-copyable!' % user_function.__name__) return result
def validateViewport(string): # Structure: "lat0,lng0,lat1,lng1" if string is None or string == '': return None try: coords = string.split(',') assert(len(coords) == 4) lat0 = float(coords[0]) lng0 = float(coords[1]) lat1 = float(coords[2]) lng1 = float(coords[3]) # Latitudes between -90 and 90 assert(lat0 >= -90.0 or lat0 <= 90.0) assert(lat1 >= -90.0 or lat1 <= 90.0) # Longitudes between -180 and 180 assert(lng0 >= -180.0 or lng0 <- 180.0) assert(lng1 >= -180.0 or lng1 <- 180.0) return string except Exception as e: logs.warning("Viewport check failed: %s" % string) raise StampedInputError("Invalid viewport: %s" % string)
def _setCachedStat(self, stat): key = str("obj::stampstat::%s" % stat.stamp_id) cacheLength = 60 * 60 # 1 hour try: self._cache.set(key, stat, time=cacheLength) except Exception as e: logs.warning("Unable to set cache for %s: %s" % (stat.stamp_id, e))
def _setCachedToken(self, token): key = str("obj::accesstoken::%s" % token.token_id) cacheLength = 60 * 60 # One hour try: self._cache.set(key, token, time=cacheLength) except Exception as e: logs.warning("Unable to set cache for %s: %s" % (token.token_id, e))
def sendEmails(self, noop=False): logs.info("Submitting emails to %s users" % len(self._emailQueue)) # Apply rate limit limit = 8 ses = boto.connect_ses(keys.aws.AWS_ACCESS_KEY_ID, keys.aws.AWS_SECRET_KEY) for emailAddress, emailQueue in self._emailQueue.iteritems(): if IS_PROD or emailAddress in self._adminEmails: count = 0 emailQueue.reverse() for email in emailQueue: count += 1 if count > limit: logs.debug("Limit exceeded for email '%s'" % emailAddress) break try: logs.debug("Send email: %s" % (email)) if not noop: ses.send_email(email.sender, email.title, email.body, emailAddress, format='html') except Exception as e: logs.warning("Email failed: %s" % email) logs.warning(utils.getFormattedException()) logs.info("Success!")
def _convertFromMongo(self, document): if document is None: return None if 'search_blurb' in document: del(document['search_blurb']) document = self._upgradeDocument(document) if '_id' in document and self._primary_key is not None: document[self._primary_key] = self._getStringFromObjectId(document['_id']) del(document['_id']) entityData = document.pop('entity') document['entity'] = {'entity_id': entityData['entity_id']} stamp = self._obj().dataImport(document, overflow=self._overflow) try: entity = buildEntity(entityData, mini=True) stamp.entity = entity except Exception as e: logs.warning("Unable to upgrade entity embedded within stamp '%s'" % (stamp.stamp_id)) return stamp
def badge(self): try: if self._unreadCount > 0: return self._unreadCount except Exception as e: logs.warning('Unable to get unread count: %s' % e) return -1
def service_request(service, method, url, body={}, header={}, query_params = {}, priority='low', timeout=DEFAULT_TIMEOUT): if timeout is None: timeout = DEFAULT_TIMEOUT if body is None: body = {} if header is None: header = {} if query_params is None: query_params = {} if query_params != {}: encoded_params = urllib.urlencode(query_params) if url.find('?') == -1: url += "?%s" % encoded_params else: url += "&%s" % encoded_params logs.info('### called service_request. service: %s url: %s priority: %s timeout: %s' % (service, url, priority, timeout)) response, content = rl_state().request(service, method, url, body, header, priority, timeout) if response.status > 400: logs.warning('service request returned an error response. status code: %s content: %s' % (response.status, content)) return response, content
def ensure_index(self, key_or_list, **kwargs): if self._debug: print("Mongo 'ensure_index'") num_retries = 0 max_retries = 5 # NOTE (travis): this method should never throw an error locally if connected to # a non-master DB node that can't ensure_index because the conn doesn't have # write permissions while True: try: ret = self._collection.ensure_index(key_or_list, **kwargs) return ret except AutoReconnect as e: if not utils.is_ec2(): return num_retries += 1 if num_retries > max_retries: msg = "Unable to ensure_index after %d retries (%s)" % \ (max_retries, self._parent.__class__.__name__) logs.warning(msg) raise logs.info("Retrying ensure_index (%s)" % (self._parent.__class__.__name__)) time.sleep(0.25)
def _copyInS3(self, oldKey, newKey): num_retries = 0 max_retries = 5 while True: try: conn = S3Connection(keys.aws.AWS_ACCESS_KEY_ID, keys.aws.AWS_SECRET_KEY) bucket = conn.lookup(self.bucket_name) if not self.bucket.get_key(oldKey): return True bucket.copy_key(newKey, self.bucket_name, oldKey, preserve_acl=True) return True except Exception as e: logs.warning('S3 Exception: %s' % e) num_retries += 1 if num_retries > max_retries: msg = "Unable to connect to S3 after %d retries (%s)" % \ (max_retries, self.__class__.__name__) logs.warning(msg) raise Exception(msg) logs.info("Retrying (%s)" % (num_retries)) time.sleep(0.5)
def __http(self, verb, service, user_id=None, token=None, priority='low', timeout=None, **parameters): """ Makes a request to the Netflix API """ self.__checkBlacklistExpiration() #if a user is specified, and she is in the blacklist, return None if user_id is not None and self.__isUserBlacklisted(user_id): return None if service.startswith('http'): url = service else: if user_id is None: url = "http://%s/%s" % (HOST, service) else: url = "http://%s/users/%s/%s" % (HOST, user_id, service) parameters['output'] = 'json' oauthRequest = oauth.OAuthRequest.from_consumer_and_token(self.__consumer, http_url=url, parameters=parameters, token=token, http_method=verb) oauthRequest.sign_request( self.__signature_method_hmac_sha1, self.__consumer, token) headers = {'Content-Type' :'application/x-www-form-urlencoded'} if verb =='POST' else {} params = oauthRequest.parameters logs.info(url) if verb == 'POST': response, content = service_request('netflix', verb, url, body=params, header=headers, priority=priority, timeout=timeout) else: response, content = service_request('netflix', verb, url, query_params=params, header=headers, priority=priority, timeout=timeout) # if the response is a 401 or 403, blacklist the user until the day expires if user_id is not None and response.status in (401, 403): if self.__addToBlacklistCount(user_id): logs.warning('Too many 401/403 responses. User added to blacklist') if response.status < 300: return json.loads(content) else: logs.info('Failed with status code %s' % response['status']) try: failData = json.loads(content)['status'] status = failData['status_code'] subcode = failData.get('sub_code', None) message = failData['message'] except: raise StampedThirdPartyError("Error parsing Netflix error response") # For the full list of possible status codes, see: http://developer.netflix.com/docs/HTTP_Status_Codes if status == 401: raise StampedThirdPartyInvalidCredentialsError(message) elif status == 412 and subcode == 710: return True else: raise StampedThirdPartyError(message)
def pop(self, imdb=None, tmdb=None, tvdb=None, item=None): if imdb: item = self.imdb.pop(imdb, None) if not item and tmdb: item = self.tmdb.pop(tmdb, None) if not item and tvdb: item = self.tvdb.pop(tvdb, None) if not item: return None self._popall(item) try: self.items.remove(item) except KeyError: logs.warning("Item didn't exist in map set, collision?") return item
def _load_cache(self, section_id): if self.cache and self.cache_section_id == section_id: return if not os.path.isfile(self.cache_file): with open(self.cache_file, 'w') as f: json.dump(dict(), f) with open(self.cache_file, 'r') as f: try: self._cache = json.load(f) except Exception as e: logs.warning("Unable to read cache, recreating ({})".format(e)) self._cache = dict() self.cache = self._cache.get(section_id, dict()) self.cache_section_id = section_id
def _handle_request(self, method, url, data=None): """Stolen from trakt.core to support optional OAUTH operations :todo: Fix trakt """ headers = { 'Content-Type': 'application/json', 'trakt-api-version': '2' } # self.logger.debug('%s: %s', method, url) headers['trakt-api-key'] = self.client_id if self.oauth: headers['Authorization'] = 'Bearer {0}'.format(self.oauth_token) # self.logger.debug('headers: %s', str(headers)) # self.logger.debug('method, url :: %s, %s', method, url) if method == 'get': # GETs need to pass data as params, not body response = requests.request(method, url, params=data, headers=headers) else: response = requests.request(method, url, data=json.dumps(data), headers=headers) # self.logger.debug('RESPONSE [%s] (%s): %s', # method, url, str(response)) if response.status_code in self.trakt_core.error_map: if response.status_code == trakt.core.errors.OAuthException.http_code: # OAuth token probably expired logs.warning(u"Trakt OAuth token invalid/expired") self.oauth_auth() return self._handle_request(method, url, data) raise self.trakt_core.error_map[response.status_code]() elif response.status_code == 204: # HTTP no content return None json_data = json.loads(response.content.decode('UTF-8', 'ignore')) return json_data
def _get_guids(self, item): self._load_cache(str(item.librarySectionID)) guids = [] try: ts = item.updatedAt.timestamp() except AttributeError: logs.warning("Missing updatedAt timestamp for {} ({})".format( item.title, item.year)) ts = 0 try: if (item.guid in self.cache and self.cache[item.guid]['updatedAt'] >= ts): guids = self.cache[item.guid]['guids'] except (KeyError, TypeError): logs.warning("Cache error, overwriting") guids = self.cache[item.guid] self.cache[item.guid] = {'guids': guids, 'updatedAt': ts} self._save_cache() if not guids: guids = [guid.id for guid in item.guids] if guids: self.cache[item.guid] = {'guids': guids, 'updatedAt': ts} self._save_cache() return guids
def weighted_sorting(self, item_list): def _get_non_theatrical_release(release_dates): # Returns earliest release date that is not theatrical # TODO PREDB types = {} for country in release_dates.get('results', []): # FIXME Look at others too? if country['iso_3166_1'] != 'US': continue for d in country['release_dates']: if d['type'] in (4, 5, 6): # 4: Digital, 5: Physical, 6: TV types[str(d['type'])] = datetime.datetime.strptime( d['release_date'], '%Y-%m-%dT%H:%M:%S.%fZ').date() break release_date = None for t, d in types.items(): if not release_date or d < release_date: release_date = d return release_date def _get_age_weight(days): if self.library_type == 'movie': # Everything younger than this will get 1 min_days = 180 # Everything older than this will get 0 max_days = (float(self.recipe['new_library']['max_age']) / 4.0 * 365.25 or 360) else: min_days = 14 max_days = (float(self.recipe['new_library']['max_age']) / 4.0 * 365.25 or 180) if days <= min_days: return 1 elif days >= max_days: return 0 else: return 1 - (days - min_days) / (max_days - min_days) total_items = len(item_list) weights = self.recipe['weighted_sorting']['weights'] # TMDB details today = datetime.date.today() total_tmdb_vote = 0.0 tmdb_votes = [] for i, m in enumerate(item_list): m['original_idx'] = i + 1 details = self.tmdb.get_details(m['tmdb_id'], self.library_type) if not details: logs.warning(u"Warning: No TMDb data for {}".format( m['title'])) continue m['tmdb_popularity'] = float(details['popularity']) m['tmdb_vote'] = float(details['vote_average']) m['tmdb_vote_count'] = int(details['vote_count']) if self.library_type == 'movie': if self.recipe['weighted_sorting']['better_release_date']: m['release_date'] = _get_non_theatrical_release( details['release_dates']) or \ datetime.datetime.strptime( details['release_date'], '%Y-%m-%d').date() else: m['release_date'] = datetime.datetime.strptime( details['release_date'], '%Y-%m-%d').date() item_age_td = today - m['release_date'] elif self.library_type == 'tv': try: m['last_air_date'] = datetime.datetime.strptime( details['last_air_date'], '%Y-%m-%d').date() except TypeError: m['last_air_date'] = today item_age_td = today - m['last_air_date'] m['genres'] = [g['name'].lower() for g in details['genres']] m['age'] = item_age_td.days if (self.library_type == 'tv' or m['tmdb_vote_count'] > 150 or m['age'] > 50): tmdb_votes.append(m['tmdb_vote']) total_tmdb_vote += m['tmdb_vote'] item_list[i] = m tmdb_votes.sort() for i, m in enumerate(item_list): # Distribute all weights evenly from 0 to 1 (times global factor) # More weight means it'll go higher in the final list index_weight = float(total_items - i) / float(total_items) m['index_weight'] = index_weight * weights['index'] if m.get('tmdb_popularity'): if (self.library_type == 'tv' or m.get('tmdb_vote_count') > 150 or m['age'] > 50): vote_weight = ((tmdb_votes.index(m['tmdb_vote']) + 1) / float(len(tmdb_votes))) else: # Assume below average rating for new/less voted items vote_weight = 0.25 age_weight = _get_age_weight(float(m['age'])) if weights.get('random'): random_weight = random.random() m['random_weight'] = random_weight * weights['random'] else: m['random_weight'] = 0.0 m['vote_weight'] = vote_weight * weights['vote'] m['age_weight'] = age_weight * weights['age'] weight = (m['index_weight'] + m['vote_weight'] + m['age_weight'] + m['random_weight']) for genre, value in weights['genre_bias'].items(): if genre.lower() in m['genres']: weight *= value m['weight'] = weight else: m['vote_weight'] = 0.0 m['age_weight'] = 0.0 m['weight'] = index_weight item_list[i] = m item_list.sort(key=lambda m: m['weight'], reverse=True) for i, m in enumerate(item_list): if (i + 1) < m['original_idx']: net = Colors.GREEN + u'↑' elif (i + 1) > m['original_idx']: net = Colors.RED + u'↓' else: net = u' ' net += str(abs(i + 1 - m['original_idx'])).rjust(3) try: # TODO logs.info( u"{} {:>3}: trnd:{:>3}, w_trnd:{:0<5}; vote:{}, " "w_vote:{:0<5}; age:{:>4}, w_age:{:0<5}; w_rnd:{:0<5}; " "w_cmb:{:0<5}; {} {}{}".format( net, i + 1, m['original_idx'], round(m['index_weight'], 3), m.get('tmdb_vote', 0.0), round(m['vote_weight'], 3), m.get('age', 0), round(m['age_weight'], 3), round(m.get('random_weight', 0), 3), round(m['weight'], 3), str(m['title']), str(m['year']), Colors.RESET)) except UnicodeEncodeError: pass return item_list