def distill_article_interactions(session, user, data): """ extracts info from user_activity_data :param session: :param event: :param value: :param user: """ event = data['event'] value = data['value'] article_id = int(data['article_id']) log(f'event is: {event}') if "UMR - OPEN ARTICLE" in event: article_opened(session, article_id, user) elif "UMR - LIKE ARTICLE" in event: article_liked(session, article_id, user, True) elif "UMR - UNLIKE ARTICLE" in event: article_liked(session, article_id, user, False) elif "UMR - USER FEEDBACK" in event: article_feedback(session, article_id, user, value)
def article_id(): """ returns the article at that URL or creates an article and returns it takes url as URL argument NOTE: the url should be encoded with quote_plus (Pyton) and encodeURIComponent(Javascript) :return: article id """ url = request.args.get("url", "") if not url: flask.abort(400) try: article = Article.find_or_create(db_session, url) return json_result(dict(article_id=article.id)) except Exception as e: from sentry_sdk import capture_exception capture_exception(e) zeeguu_core.log(e) flask.abort(500)
def create_from_post_data(cls, session, data, user): _time = data['time'] time = datetime.strptime(_time, JSON_TIME_FORMAT) event = data['event'] value = data['value'] extra_data = data['extra_data'] if extra_data == '{}': extra_data = '' article_id = None has_article_id = False if data['article_id'] != '': article_id = int(data['article_id']) has_article_id = True zeeguu_core.log( f'{event} value[:42]: {value[:42]} extra_data[:42]: {extra_data[:42]} art_id: {article_id}' ) new_entry = UserActivityData(user, time, event, value, extra_data, has_article_id, article_id) session.add(new_entry) session.commit() if has_article_id: UserReadingSession.update_reading_session(session, event, user.id, article_id, current_time=time)
def create_from_post_data(cls, session, data, user): _time = data.get("time", None) time = None if _time: time = datetime.strptime(_time, JSON_TIME_FORMAT) event = data.get("event", "") value = data.get("value", "") extra_data = data.get("extra_data", "") article_id = None has_article_id = False if data.get("article_id", None): article_id = int(data["article_id"]) has_article_id = True zeeguu_core.log( f"{event} value[:42]: {value[:42]} extra_data[:42]: {extra_data[:42]} art_id: {article_id}" ) new_entry = UserActivityData(user, time, event, value, extra_data, has_article_id, article_id) session.add(new_entry) session.commit() if has_article_id: UserReadingSession.update_reading_session(session, event, user.id, article_id, current_time=time)
def get_feeds_being_followed(): """ A user might be following multiple feeds at once. This endpoint returns them as a list. :return: a json list with feeds for which the user is registered; every feed in this list is a dictionary with the following info: id = unique id of the feed; uniquely identifies feed in other endpoints title = <unicode string> url = ... language = ... image_url = ... """ registrations = RSSFeedRegistration.feeds_for_user(flask.g.user) feed_list = [] for reg in registrations: try: feed_list.append(reg.rss_feed.as_dictionary()) except Exception as e: from sentry_sdk import capture_exception capture_exception(e) zeeguu_core.log(str(e)) return json_result(feed_list)
def wrapper(*args, **kwargs): start = time.time() result = func(*args, **kwargs) fname = func.__name__ elapsed_time = (time.time() - start) * 1000 log(fname + ' ran for ' + "{0:.2f}".format(elapsed_time) + 'ms') return result
def update_bookmark_priority(cls, db, user): """ Update all bookmark priorities of one user :param db: The connection to the database :param user: The user object """ try: bookmarks_for_user = user.all_bookmarks_fit_for_study() fit_for_study_count = len(bookmarks_for_user) zeeguu_core.log(f"{fit_for_study_count} bookmarks fit for study") if fit_for_study_count == 0: return # tuple(0=bookmark, 1=exercise) bookmark_exercise_of_user = map(cls._get_exercise_of_bookmark, bookmarks_for_user) b1, b2 = itertools.tee(bookmark_exercise_of_user, 2) max_iterations = max( pair.exercise.id if pair.exercise is not None else 0 for pair in b1) exercises_and_priorities = [ cls._calculate_bookmark_priority(x, max_iterations) for x in b2 ] # with db.session.no_autoflush: # might not be needed, but just to be safe for each in exercises_and_priorities: entry = BookmarkPriorityARTS.find_or_create( each.bookmark, each.priority) zeeguu_core.log( f"Updating {each.bookmark.id} with priority: {each.priority} from: {entry.priority}" ) entry.priority = each.priority max_retries = 3 while True: try: db.session.add(entry) db.session.commit() break except IntegrityError: db.session.rollback() capture_message( "conflict in saving bookmark priority; will retry") max_retries -= 1 if max_retries < 1: raise except Exception as e: db.session.rollback() capture_exception(e) print('Error during updating bookmark priority') print(e) print(traceback.format_exc())
def quality_bookmark(self): # If it's starred by the user, then it's good quality! if self.starred: zeeguu_core.log("starred -> good quality") return True # Else it just should not be bad quality! return not self.bad_quality_bookmark()
def article_search_for_user(user, count, search_terms): try: return elastic_article_search_for_user(user, count, search_terms) except Exception as e: log(ES_DOWN_MESSAGE) return mixed_article_search_for_user(user, count, search_terms)
def article_opened(session, article_id, user): article = Article.query.filter_by(id=article_id).one() ua = UserArticle.find(user, article) if not ua: ua = UserArticle.find_or_create(session, user, article, opened=datetime.now()) ua.opened = datetime.now() session.add(ua) session.commit() log(f"{ua}")
def article_liked(session, article_id, user, like_value): from zeeguu_core.emailer.user_activity import send_notification_article_feedback article = Article.query.filter_by(id=article_id).one() ua = UserArticle.find(user, article) ua.liked = like_value session.add(ua) session.commit() log(f"{ua}") send_notification_article_feedback('Liked', user, article.title, article.url.as_string(), article.id)
def translate_and_bookmark(from_lang_code, to_lang_code): """ @deprecated This should be deprecated and /get_possible_translations used instead However, it is still used by the zeeguu chrome extension. This expects in the post parameter the following: - word (to translate) - context (surrounding paragraph of the original word ) - url (of the origin) - title (of the origin page) /get_possible_translations has very similar behavior, only that if focuses on returning the possible alternative translations :param from_lang_code: :param to_lang_code: :return: """ data = {"from_lang_code": from_lang_code, "to_lang_code": to_lang_code} word_str = unquote_plus(request.form['word']) data["word"] = word_str url_str = request.form.get('url', '') data["url"] = url_str title_str = request.form.get('title', '') data["title"] = title_str context_str = request.form.get('context', '') data["context"] = context_str # the url comes from elsewhere not from the reader, so we find or creat the article article = Article.find_or_create(db_session, url_str) article_id = article.id try: minimal_context, query = minimize_context( data["context"], data["from_lang_code"], data["word"]) data["query"] = query translations = get_all_translations(data).translations best_guess = translations[0]["translation"] bookmark = Bookmark.find_or_create(db_session, flask.g.user, word_str, from_lang_code, best_guess, to_lang_code, minimal_context, url_str, title_str, article_id) except ValueError as e: zeeguu_core.log(f"minimize context failed {e}on: {context_str} x {from_lang_code} x {word_str} ") return context_str, query return json_result(dict( bookmark_id=bookmark.id, translation=best_guess))
def article_search_for_user(user, count, search_terms): try: return elastic_article_search_for_user(user, count, search_terms) except elasticsearch.exceptions.ConnectionError: log(ES_DOWN_MESSAGE) log(print(traceback.format_exc())) return mixed_article_search_for_user(user, count, search_terms)
def article_recommendations_for_user(user, count): try: return elastic_article_recommendations_for_user(user, count) except Exception as e: log(ES_DOWN_MESSAGE) finally: return mixed_article_recommendations_for_user(user, count)
def sufficient_quality(art: newspaper.Article, reason_dict): for each in html_read_more_patterns: if art.html.find(each) > 0: zeeguu_core.log( f"Incomplete Article (based on HTML analysis): {art.url} contains: {each}" ) _update_reason_dict(reason_dict, f'Html contains incomplete pattern: {each}') return False return sufficient_quality_of_text(art.text, art.url, reason_dict)
def get_students(self): from zeeguu_core.model.user import User # compatibility reasons: if there is an associated invitation code # use it; otherwise fallback on the cohort that's associated with the User if self.inv_code and len(self.inv_code) > 1: zeeguu_core.log("we have an invitation code...") return User.query.filter_by(invitation_code=self.inv_code).all() zeeguu_core.log("falling back on filtering based on cohort") return User.query.filter(User.cohort == self).all()
def json_serializable_dict(self, with_context=True, with_title=False): try: translation_word = self.translation.word except AttributeError as e: translation_word = '' zeeguu_core.log( f"Exception caught: for some reason there was no translation for {self.id}" ) print(str(e)) word_info = Word.stats(self.origin.word, self.origin.language.code) learned_datetime = str( self.learned_time.date()) if self.learned else '' created_day = "today" if self.time.date() == datetime.now().date( ) else '' bookmark_title = "" if with_title: try: bookmark_title = self.text.article.title except Exception as e: from sentry_sdk import capture_exception capture_exception(e) print( f"could not find article title for bookmark with id: {self.id}" ) result = dict( id=self.id, to=translation_word, from_lang=self.origin.language.code, to_lang=self.translation.language.code, title=bookmark_title, url=self.text.url.as_string(), origin_importance=word_info.importance, learned_datetime=SortedExerciseLog( self).str_most_recent_correct_dates(), origin_rank=word_info.rank if word_info.rank != 100000 else '', starred=self.starred if self.starred is not None else False, article_id=self.text.article_id if self.text.article_id else '', created_day=created_day, # human readable stuff... time=self.time.strftime(JSON_TIME_FORMAT)) if self.text.article: result['article_title'] = self.text.article.title result["from"] = self.origin.word if with_context: result['context'] = self.text.content return result
def publishing_date(item): # this used to be updated_parsed but cf the deprecation # warning we changed to published_parsed instead. try: return item.published_parsed except: # March 8 -- added back in updated_parsed; # curious if this fixes the problem in some # cases; to find out, we log zeeguu_core.log(f'trying updated_parsed where published_parsed failed for {item.get("link", "")} in the context of {self.url.as_string()}') result = item.updated_parsed return result
def preferred_difficulty_estimator(self): """ :return: Difficulty estimator from preferences, otherwise the default one which is FrequencyDifficultyEstimator """ from zeeguu_core.model.user_preference import UserPreference # Must have this import here to avoid circular dependency preference = UserPreference.get_difficulty_estimator( self) or "FleschKincaidDifficultyEstimator" zeeguu_core.log( f"Difficulty estimator for user {self.id}: {preference}") return preference
def __create_new_source(cls, source): index = cls.sources.index(source) source = cls.sources[index] if source is None: zeeguu_core.log( "ExerciseSource {0} is not defined in SourceRule".format( source)) raise KeyError new_source = ExerciseSource(source) cls.save(new_source) return new_source
def wrapped_view(*args, **kwargs): try: session_id = int(flask.request.args['session']) zeeguu_core.log(("API CALL: " + str(view))) except: flask.abort(401) session = Session.query.get(session_id) if session is None: flask.abort(401) flask.g.user = session.user session.update_use_date() zeeguu_core.db.session.add(session) zeeguu_core.db.session.commit() return view(*args, **kwargs)
def retrieve_articles_from_all_feeds(): counter = 0 all_feeds = RSSFeed.query.all() all_feeds_count = len(all_feeds) for feed in all_feeds: counter += 1 try: msg = f"*** >>>>>>>>> {feed.title} ({counter}/{all_feeds_count}) <<<<<<<<<< " # .encode('utf-8') log("") log(f"{msg}") download_from_feed(feed, zeeguu_core.db.session) except Exception as e: traceback.print_exc()
def bookmarks_to_study(user, desired_bookmarks_count=10): """ Returns a list of bookmarks with the highest priorities An equal amount of bookmarks from each used algorithm (ABTesting) are selected Otherwise, an equal amount of bookmarks is taken from each bookmark_group and concatenated into a list, which is then returned. The amount of bookmarks taken from each group can differ by 1, depending on whether the possible_bookmarks_to_return_count is equally dividable by the group count. """ bookmarks = (Bookmark.query. filter_by(user_id=user.id). filter_by(learned=False). join(BookmarkPriorityARTS, BookmarkPriorityARTS.bookmark_id == Bookmark.id). join(UserWord, Bookmark.origin_id == UserWord.id). filter(or_(Bookmark.fit_for_study == True, Bookmark.starred == True)). filter(UserWord.language_id == user.learned_language_id). order_by(BookmarkPriorityARTS.priority.desc()). all()) # Group the bookmarks by their used priority algorithm in lists bookmark_groups = ABTesting.split_bookmarks_based_on_algorithm(bookmarks) if len(bookmarks) == 0: log("zero bookmarks that match the filter") return [] group_count = len(bookmark_groups) log(f"bookmark groups: {group_count}") if group_count == 0: return [] # Select bookmarks from the algorithm groups bookmarks_to_return = [] possible_bookmarks_to_return_count = min(desired_bookmarks_count, len(bookmarks)) i = 0 # counter to select from different groups while possible_bookmarks_to_return_count != len(bookmarks_to_return): idx = i % len(bookmark_groups) if 0 < len(bookmark_groups[idx]): bookmarks_to_return.append(bookmark_groups[idx].pop(0)) if i >= len(bookmarks): # no more bookmarks available... break i = i + 1 return bookmarks_to_return
def report_exercise_outcome(exercise_outcome, exercise_source, exercise_solving_speed, bookmark_id): """ In the model parlance, an exercise is an entry in a table that logs the performance of an exercise. Every such performance, has a source, and an outcome. :param exercise_outcome: One of: Correct, Retry, Wrong, Typo, Too easy :param exercise_source: has been assigned to your app by zeeguu :param exercise_solving_speed: in milliseconds :param bookmark_id: the bookmark for which the data is reported :return: """ try: bookmark = Bookmark.find(bookmark_id) new_source = ExerciseSource.find(exercise_source) new_outcome = ExerciseOutcome.find_or_create(db_session, exercise_outcome) if not bookmark: return "could not find bookmark" if not new_source: return "could not find source" if not new_outcome: return "could not find outcome" exercise = Exercise(new_outcome, new_source, exercise_solving_speed, datetime.now()) bookmark.add_new_exercise(exercise) bookmark.update_fit_for_study(db_session) bookmark.update_learned_status(db_session) db_session.add(exercise) db_session.commit() # Update the exercise session from zeeguu_core.model import UserExerciseSession UserExerciseSession.update_exercise_session(exercise, db_session) zeeguu_core.log("recomputting bookmark priorities") BookmarkPriorityUpdater.update_bookmark_priority( zeeguu_core.db, flask.g.user) return "OK" except: traceback.print_exc() return "FAIL"
def get_possible_translations(from_lang_code, to_lang_code): """ Returns a list of possible translations in :param to_lang_code for :param word in :param from_lang_code. You must also specify the :param context, :param url, and :param title of the page where the word was found. The context is the sentence. :return: json array with translations """ data = {"from_lang_code": from_lang_code, "to_lang_code": to_lang_code} data["context"] = request.form.get('context', '') url = request.form.get('url', '') data["url"] = url article_id = None if 'articleID' in url: article_id = url.split('articleID=')[-1] url = Article.query.filter_by(id=article_id).one().url.as_canonical_string() elif 'articleURL' in url: url = url.split('articleURL=')[-1] else: # the url comes from elsewhere not from the reader, so we find or creat the article article = Article.find_or_create(db_session, url) article_id = article.id zeeguu_core.log(f"url before being saved: {url}") word_str = request.form['word'] data["word"] = word_str title_str = request.form.get('title', '') data["title"] = title_str zeeguu_core.log(f'translating to... {data["to_lang_code"]}') minimal_context, query = minimize_context( data["context"], data["from_lang_code"], data["word"]) zeeguu_core.log(f"Query to translate is: {query}") data["query"] = query translations = get_all_translations(data).translations zeeguu_core.log(f"Got translations: {translations}") # translators talk about quality, but our users expect likelihood. # rename the key in the dictionary for t in translations: t['likelihood'] = t.pop("quality") t['source'] = t.pop('service_name') best_guess = translations[0]["translation"] Bookmark.find_or_create(db_session, flask.g.user, word_str, from_lang_code, best_guess, to_lang_code, minimal_context, url, title_str, article_id) return json_result(dict(translations=translations))
def retrieve_articles_from_all_feeds(): counter = 0 all_feeds = RSSFeed.query.all() all_feeds_count = len(all_feeds) for feed in all_feeds: counter += 1 msg = f"{counter}/{all_feeds_count}: DOWNLOADING {feed.title}".encode( 'utf-8') print(msg) zeeguu_core.log(msg) download_from_feed(feed, zeeguu_core.db.session) msg = f"{counter}/{all_feeds_count}: FINISHED DOWNLOADING {feed.title}".encode( 'utf-8') print(msg) zeeguu_core.log(msg)
def update_learned_status(self, session): """ To call when something happened to the bookmark, that requires it's "learned" status to be updated. :param session: :return: """ log = SortedExerciseLog(self) is_learned = is_learned_based_on_exercise_outcomes(log) if is_learned: zeeguu_core.log(f"Log: {log.summary()}: bookmark {self.id} learned!") self.learned_time = log.last_exercise_time() self.learned = True session.add(self) else: zeeguu_core.log(f"Log: {log.summary()}: bookmark {self.id} not learned yet.")
def feed_items(self): """ :return: a dictionary with info about that feed extracted by feedparser and including: title, url, content, summary, time """ def publishing_date(item): # this used to be updated_parsed but cf the deprecation # warning we changed to published_parsed instead. try: return item.published_parsed except: # March 8 -- added back in updated_parsed; # curious if this fixes the problem in some # cases; to find out, we log zeeguu_core.log( f'trying updated_parsed where published_parsed failed for {item.get("link", "")} in the context of {self.url.as_string()}' ) result = item.updated_parsed return result response = requests.get(self.url.as_string()) feed_data = feedparser.parse(response.text) feed_items = [] for item in feed_data.entries: try: new_item_data_dict = dict(title=item.get("title", ""), url=item.get("link", ""), content=item.get("content", ""), summary=item.get("summary", ""), published=time.strftime( SIMPLE_TIME_FORMAT, publishing_date(item))) feed_items.append(new_item_data_dict) except AttributeError as e: zeeguu_core.log( f'Exception {e} while trying to retrieve {item.get("link", "")}' ) return feed_items
def report_exercise_outcome(exercise_outcome, exercise_source, exercise_solving_speed, bookmark_id): """ In the model parlance, an exercise is an entry in a table that logs the performance of an exercise. Every such performance, has a source, and an outcome. :param exercise_outcome: One of: Correct, Retry, Wrong, Typo, Too easy :param exercise_source: has been assigned to your app by zeeguu :param exercise_solving_speed: in milliseconds :param bookmark_id: the bookmark for which the data is reported :return: """ try: bookmark = Bookmark.find(bookmark_id) new_source = ExerciseSource.find(exercise_source) new_outcome = ExerciseOutcome.find_or_create(db_session, exercise_outcome) if not bookmark: return "could not find bookmark" if not new_source: return "could not find source" if not new_outcome: return "could not find outcome" exercise = Exercise(new_outcome, new_source, exercise_solving_speed, datetime.now()) bookmark.add_new_exercise(exercise) bookmark.update_fit_for_study(db_session) bookmark.update_learned_status(db_session) db_session.add(exercise) db_session.commit() # Update the exercise session from zeeguu_core.model import UserExerciseSession UserExerciseSession.update_exercise_session(exercise, db_session) zeeguu_core.log("recomputting bookmark priorities") BookmarkPriorityUpdater.update_bookmark_priority(zeeguu_core.db, flask.g.user) return "OK" except: traceback.print_exc() return "FAIL"
def get_subscribed_filters(): """ A user might be subscribed to multiple filters at once. This endpoint returns them as a list. :return: a json list with filters for which the user is registered; every filter in this list is a dictionary with the following info: id = unique id of the topic; title = <unicode string> """ filters = TopicFilter.all_for_user(flask.g.user) filter_list = [] for fil in filters: try: filter_list.append(fil.topic.as_dictionary()) except Exception as e: zeeguu_core.log(str(e)) return json_result(filter_list)
def get_subscribed_topics(): """ A user might be subscribed to multiple topics at once. This endpoint returns them as a list. :return: a json list with feeds for which the user is registered; every feed in this list is a dictionary with the following info: id = unique id of the topic; title = <unicode string> """ subscriptions = TopicSubscription.all_for_user(flask.g.user) topic_list = [] for sub in subscriptions: try: topic_list.append(sub.topic.as_dictionary()) except Exception as e: zeeguu_core.log(str(e)) return json_result(topic_list)
def bookmarks_to_study(bookmark_count): """ Returns a number of <bookmark_count> bookmarks that are recommended for this user to study """ int_count = int(bookmark_count) to_study = flask.g.user.bookmarks_to_study(int_count) if not to_study: # We might be in the situation of the priorities never having been # computed since theuser never did an exercise, and currently only # then are priorities recomputed; thus, in this case, we try to # update, and maybe this will solve the problem zeeguu_core.log("recomputting bookmark priorities since there seem to be no bookmarks to study") BookmarkPriorityUpdater.update_bookmark_priority(zeeguu_core.db, flask.g.user) to_study = flask.g.user.bookmarks_to_study(int_count) as_json = [bookmark.json_serializable_dict() for bookmark in to_study] return json_result(as_json)
def get_filtered_searches(): """ A user might be subscribed to multiple search filters at once. This endpoint returns them as a list. :return: a json list with searches for which the user is registered; every search in this list is a dictionary with the following info: id = unique id of the topic; search_keywords = <unicode string> """ filters = SearchFilter.all_for_user(flask.g.user) filtered_searches = [] for filt in filters: try: filtered_searches.append(filt.search.as_dictionary()) except Exception as e: zeeguu_core.log(str(e)) return json_result(filtered_searches)
def get_subscribed_searches(): """ A user might be subscribed to multiple searches at once. This endpoint returns them as a list. :return: a json list with searches for which the user is registered; every search in this list is a dictionary with the following info: id = unique id of the search; search_keywords = <unicode string> """ subscriptions = SearchSubscription.all_for_user(flask.g.user) searches_list = [] for subs in subscriptions: try: searches_list.append(subs.search.as_dictionary()) except Exception as e: zeeguu_core.log(str(e)) return json_result(searches_list)
def unfilter_search(): """ A user can unsubscribe from the search with a given ID :return: OK / ERROR """ search_id = int(request.form.get('search_id', '')) try: to_delete = SearchFilter.with_search_id(search_id, flask.g.user) session.delete(to_delete) to_delete = Search.find_by_id(search_id) session.delete(to_delete) session.commit() except Exception as e: zeeguu_core.log(str(e)) return "OOPS. SEARCH AIN'T THERE IT SEEMS (" + str(e) + ")" return "OK"
def article_id(): """ returns the article at that URL or creates an article and returns it takes url as URL argument NOTE: the url should be encoded with quote_plus (Pyton) and encodeURIComponent(Javascript) :return: article id """ url = request.args.get('url', '') if not url: flask.abort(400) try: article = Article.find_or_create(db_session, url) return json_result(dict(article_id=article.id)) except Exception as e: zeeguu_core.log(e) flask.abort(500)
def get_next_results(data, exclude_services=[], exclude_results=[], number_of_results=-1): translator_data = { "source_language": data["from_lang_code"], "target_language": data["to_lang_code"], "query": data["query"] } api_mux = None if data["from_lang_code"] == data["to_lang_code"] == "en": api_mux = api_mux_worddefs else: api_mux = api_mux_translators if number_of_results == 1: logger.debug("Getting only top result") translator_results = api_mux.get_next_results( translator_data, number_of_results=1) else: logger.debug("Getting all results") translator_results = api_mux.get_next_results( translator_data, number_of_results=-1, exclude_services=exclude_services) zeeguu_core.log(f"Got results get_next_results: {translator_results}") json_translator_results = [(x, y.to_json()) for x, y in translator_results] logger.debug("get_next_results Zeeguu-API - Got results: %s" % json_translator_results) logger.debug("get_next_results - exclude_services %s" % exclude_services) # Returning data: [('GoogleTranslateWithContext', # <python_translators.translation_response.TranslationResponse>), ...] translations = [] for service_name, translation in translator_results: if translation is None: continue lower_translation = translation.translations[0]["translation"].lower() if lower_translation in exclude_results: # Translation already exists fetched by get_top_translation continue translations = merge_translations(translations, translation.translations) translations = filter_empty_translations(translations) if not MULTI_LANG_TRANSLATOR_AB_TESTING: # Disabling order by quality when A/B testing is enabled translations = order_by_quality(translations, translator_data["query"]) zeeguu_core.log(f"Translations get_next_results: {translations}") response = TranslationResponse(translations=translations) zeeguu_core.log(f"Returning response get_next_results: {response}") return response