def get(self): """Handles GET requests.""" if self.user_id is None: raise self.PageNotFoundException editable_exp_summaries = ( exp_services.get_at_least_editable_exploration_summaries( self.user_id)) def _get_intro_card_color(category): return ( feconf.CATEGORIES_TO_COLORS[category] if category in feconf.CATEGORIES_TO_COLORS else feconf.DEFAULT_COLOR) self.values.update({ 'explorations_list': [{ 'id': exp_summary.id, 'title': exp_summary.title, 'category': exp_summary.category, 'objective': exp_summary.objective, 'language_code': exp_summary.language_code, 'last_updated': utils.get_time_in_millisecs( exp_summary.exploration_model_last_updated), 'created_on': utils.get_time_in_millisecs( exp_summary.exploration_model_created_on), 'status': exp_summary.status, 'community_owned': exp_summary.community_owned, 'is_editable': True, 'thumbnail_image_url': ( '/images/gallery/exploration_background_%s_small.png' % _get_intro_card_color(exp_summary.category)), } for exp_summary in editable_exp_summaries.values()], }) self.render_json(self.values)
def get(self): """Handles GET requests.""" if self.user_id is None: raise self.PageNotFoundException subscribed_summaries = ( exp_services.get_exploration_summaries_matching_ids( subscription_services.get_exploration_ids_subscribed_to( self.user_id))) def _get_intro_card_color(category): return ( feconf.CATEGORIES_TO_COLORS[category] if category in feconf.CATEGORIES_TO_COLORS else feconf.DEFAULT_COLOR) explorations_list = [] for exp_summary in subscribed_summaries: if exp_summary is None: continue feedback_thread_analytics = feedback_services.get_thread_analytics( exp_summary.id) explorations_list.append({ 'id': exp_summary.id, 'title': exp_summary.title, 'category': exp_summary.category, 'objective': exp_summary.objective, 'language_code': exp_summary.language_code, 'last_updated': utils.get_time_in_millisecs( exp_summary.exploration_model_last_updated), 'created_on': utils.get_time_in_millisecs( exp_summary.exploration_model_created_on), 'status': exp_summary.status, 'community_owned': exp_summary.community_owned, 'is_editable': True, 'thumbnail_icon_url': ( utils.get_thumbnail_icon_url_for_category( exp_summary.category)), 'thumbnail_bg_color': utils.get_hex_color_for_category( exp_summary.category), 'ratings': exp_summary.ratings, 'num_open_threads': ( feedback_thread_analytics.num_open_threads), 'num_total_threads': ( feedback_thread_analytics.num_total_threads), }) explorations_list = sorted( explorations_list, key=lambda x: (x['num_open_threads'], x['last_updated']), reverse=True) self.values.update({ 'explorations_list': explorations_list, }) self.render_json(self.values)
def map(item): user_id = item.id job_queued_msec = RecentUpdatesMRJobManager._get_job_queued_msec() reducer_key = '%s@%s' % (user_id, job_queued_msec) activity_ids_list = item.activity_ids feedback_thread_ids_list = item.feedback_thread_ids activities = exp_models.ExplorationModel.get_multi( activity_ids_list, include_deleted=True) for ind, activity in enumerate(activities): if activity is None: logging.error( 'Could not find activity %s' % activity_ids_list[ind]) continue metadata_obj = exp_models.ExplorationModel.get_snapshots_metadata( activity.id, [activity.version], allow_deleted=True)[0] yield (reducer_key, { 'type': feconf.UPDATE_TYPE_EXPLORATION_COMMIT, 'activity_id': activity.id, 'activity_title': activity.title, 'author_id': metadata_obj['committer_id'], 'last_updated_ms': utils.get_time_in_millisecs( activity.last_updated), 'subject': ( feconf.COMMIT_MESSAGE_EXPLORATION_DELETED if activity.deleted else metadata_obj['commit_message'] ), }) # If the user subscribes to this activity, he/she is automatically # subscribed to all feedback threads for this activity. if not activity.deleted: threads = feedback_services.get_threadlist(activity.id) for thread in threads: if thread['thread_id'] not in feedback_thread_ids_list: feedback_thread_ids_list.append(thread['thread_id']) for feedback_thread_id in feedback_thread_ids_list: last_message = ( feedback_models.FeedbackMessageModel.get_most_recent_message( feedback_thread_id)) yield (reducer_key, { 'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE, 'activity_id': last_message.exploration_id, 'activity_title': exp_models.ExplorationModel.get_by_id( last_message.exploration_id).title, 'author_id': last_message.author_id, 'last_updated_ms': utils.get_time_in_millisecs( last_message.created_on), 'subject': last_message.get_thread_subject(), })
def get(self): """Handles GET requests.""" if self.user_id is None: raise self.PageNotFoundException subscribed_summaries = exp_services.get_exploration_summaries_matching_ids( subscription_services.get_activity_ids_subscribed_to(self.user_id) ) def _get_intro_card_color(category): return ( feconf.CATEGORIES_TO_COLORS[category] if category in feconf.CATEGORIES_TO_COLORS else feconf.DEFAULT_COLOR ) explorations_list = [] for exp_summary in subscribed_summaries: if exp_summary is None: continue feedback_thread_analytics = feedback_services.get_thread_analytics(exp_summary.id) explorations_list.append( { "id": exp_summary.id, "title": exp_summary.title, "category": exp_summary.category, "objective": exp_summary.objective, "language_code": exp_summary.language_code, "last_updated": utils.get_time_in_millisecs(exp_summary.exploration_model_last_updated), "created_on": utils.get_time_in_millisecs(exp_summary.exploration_model_created_on), "status": exp_summary.status, "community_owned": exp_summary.community_owned, "is_editable": True, "thumbnail_image_url": ( "/images/gallery/exploration_background_%s_small.png" % _get_intro_card_color(exp_summary.category) ), "ratings": exp_summary.ratings, "num_open_threads": (feedback_thread_analytics["num_open_threads"]), "num_total_threads": (feedback_thread_analytics["num_total_threads"]), } ) explorations_list = sorted( explorations_list, key=lambda x: (x["num_open_threads"], x["last_updated"]), reverse=True ) self.values.update({"explorations_list": explorations_list}) self.render_json(self.values)
def get_displayable_exp_summary_dicts(exploration_summaries): """Given a list of exploration summary domain objects, returns a list, with the same number of elements, of the corresponding human-readable exploration summary dicts. This assumes that all the exploration summary domain objects passed in are valid (i.e., none of them are None). """ exploration_ids = [ exploration_summary.id for exploration_summary in exploration_summaries] view_counts = ( stats_jobs_continuous.StatisticsAggregator.get_views_multi( exploration_ids)) displayable_exp_summaries = [] for ind, exploration_summary in enumerate(exploration_summaries): if not exploration_summary: continue summary_dict = { 'id': exploration_summary.id, 'title': exploration_summary.title, 'activity_type': feconf.ACTIVITY_TYPE_EXPLORATION, 'category': exploration_summary.category, 'created_on_msec': utils.get_time_in_millisecs( exploration_summary.exploration_model_created_on), 'objective': exploration_summary.objective, 'language_code': exploration_summary.language_code, 'last_updated_msec': utils.get_time_in_millisecs( exploration_summary.exploration_model_last_updated ), 'human_readable_contributors_summary': ( get_human_readable_contributors_summary( exploration_summary.contributors_summary) ), 'status': exploration_summary.status, 'ratings': exploration_summary.ratings, 'community_owned': exploration_summary.community_owned, 'tags': exploration_summary.tags, 'thumbnail_icon_url': utils.get_thumbnail_icon_url_for_category( exploration_summary.category), 'thumbnail_bg_color': utils.get_hex_color_for_category( exploration_summary.category), 'num_views': view_counts[ind], } displayable_exp_summaries.append(summary_dict) return displayable_exp_summaries
def get(self): """Handles GET requests.""" try: exp_ids = json.loads(self.request.get('stringified_exp_ids')) except Exception: raise self.PageNotFoundException if (not isinstance(exp_ids, list) or not all([ isinstance(exp_id, basestring) for exp_id in exp_ids])): raise self.PageNotFoundException exp_summaries = exp_services.get_exploration_summaries_matching_ids( exp_ids) self.values.update({ 'summaries': [(None if exp_summary is None else { 'id': exp_summary.id, 'title': exp_summary.title, 'category': exp_summary.category, 'objective': exp_summary.objective, 'language_code': exp_summary.language_code, 'last_updated': utils.get_time_in_millisecs( exp_summary.exploration_model_last_updated), 'status': exp_summary.status, 'community_owned': exp_summary.community_owned, 'thumbnail_image_url': exp_summary.thumbnail_image_url, }) for exp_summary in exp_summaries] }) self.render_json(self.values)
def get(self): """Handles GET requests.""" # TODO(sll): Figure out what to do about explorations in categories # other than those explicitly listed. query_string = self.request.get('q') search_cursor = self.request.get('cursor', None) exp_summaries_list, search_cursor = ( exp_services.get_exploration_summaries_matching_query( query_string, cursor=search_cursor)) # TODO(msl): Store 'is_editable' in exploration summary to avoid O(n) # individual lookups. Note that this will depend on user_id. explorations_list = [{ 'id': exp_summary.id, 'title': exp_summary.title, 'category': exp_summary.category, 'objective': exp_summary.objective, 'language_code': exp_summary.language_code, 'last_updated': utils.get_time_in_millisecs( exp_summary.exploration_model_last_updated), 'status': exp_summary.status, 'community_owned': exp_summary.community_owned, 'thumbnail_image_url': exp_summary.thumbnail_image_url, 'is_editable': exp_services.is_exp_summary_editable( exp_summary, user_id=self.user_id), 'ratings': exp_summary.ratings } for exp_summary in exp_summaries_list] if len(explorations_list) == feconf.DEFAULT_QUERY_LIMIT: logging.error( '%s explorations were fetched to load the gallery page. ' 'You may be running up against the default query limits.' % feconf.DEFAULT_QUERY_LIMIT) preferred_language_codes = [feconf.DEFAULT_LANGUAGE_CODE] if self.user_id: user_settings = user_services.get_user_settings(self.user_id) preferred_language_codes = user_settings.preferred_language_codes self.values.update({ 'explorations_list': explorations_list, 'preferred_language_codes': preferred_language_codes, 'search_cursor': search_cursor, }) self.render_json(self.values)
def map(item): max_start_time_msec = JobCleanupManager.get_mapper_param(MAPPER_PARAM_MAX_START_TIME_MSEC) if isinstance(item, mapreduce_model.MapreduceState): if item.result_status == "success" and utils.get_time_in_millisecs(item.start_time) < max_start_time_msec: item.delete() yield ("mr_state_deleted", 1) else: yield ("mr_state_remaining", 1) if isinstance(item, mapreduce_model.ShardState): if item.result_status == "success" and utils.get_time_in_millisecs(item.update_time) < max_start_time_msec: item.delete() yield ("shard_state_deleted", 1) else: yield ("shard_state_remaining", 1)
def test_basic_computation(self): with self.swap( jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS', self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS): EXP_ID = 'eid' EXP_TITLE = 'Title' USER_ID = 'user_id' self.save_new_valid_exploration( EXP_ID, USER_ID, title=EXP_TITLE, category='Category') expected_last_updated_ms = utils.get_time_in_millisecs( exp_services.get_exploration_by_id(EXP_ID).last_updated) ModifiedRecentUpdatesAggregator.start_computation() self.assertEqual( self.count_jobs_in_taskqueue( queue_name=taskqueue_services.QUEUE_NAME_DEFAULT), 1) self.process_and_flush_pending_tasks() self.assertEqual( ModifiedRecentUpdatesAggregator.get_recent_notifications( USER_ID)[1], [self._get_expected_exploration_created_dict( USER_ID, EXP_ID, EXP_TITLE, expected_last_updated_ms)])
def get_snapshots_metadata(cls, model_instance_id, version_numbers, allow_deleted=False): """Returns a list of dicts, each representing a model snapshot. One dict is returned for each version number in the list of version numbers requested. If any of the version numbers does not exist, an error is raised. If `allow_deleted` is False, an error is raised if the current model has been deleted. """ if not allow_deleted: cls.get(model_instance_id)._require_not_marked_deleted() snapshot_ids = [cls._get_snapshot_id(model_instance_id, version_number) for version_number in version_numbers] metadata_keys = [ndb.Key(cls.SNAPSHOT_METADATA_CLASS, snapshot_id) for snapshot_id in snapshot_ids] returned_models = ndb.get_multi(metadata_keys) for ind, model in enumerate(returned_models): if model is None: raise Exception( "Invalid version number %s for model %s with id %s" % (version_numbers[ind], cls.__name__, model_instance_id) ) return [ { "committer_id": model.committer_id, "commit_message": model.commit_message, "commit_cmds": model.commit_cmds, "commit_type": model.commit_type, "version_number": version_numbers[ind], "created_on_ms": utils.get_time_in_millisecs(model.created_on), } for (ind, model) in enumerate(returned_models) ]
def test_basic_computation_with_an_update_after_creation(self): with self.swap( jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS', self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS): EXP_ID = 'eid' EXP_TITLE = 'Title' USER_ID = 'user_id' ANOTHER_USER_ID = 'another_user_id' self.save_new_valid_exploration( EXP_ID, USER_ID, title=EXP_TITLE, category='Category') # Another user makes a commit; this, too, shows up in the # original user's dashboard. exp_services.update_exploration( ANOTHER_USER_ID, EXP_ID, [], 'Update exploration') expected_last_updated_ms = utils.get_time_in_millisecs( exp_services.get_exploration_by_id(EXP_ID).last_updated) ModifiedRecentUpdatesAggregator.start_computation() self.assertEqual( self.count_jobs_in_taskqueue( queue_name=taskqueue_services.QUEUE_NAME_DEFAULT), 1) self.process_and_flush_pending_tasks() recent_updates = ( ModifiedRecentUpdatesAggregator.get_recent_updates(USER_ID)[1]) self.assertEqual([{ 'type': feconf.UPDATE_TYPE_EXPLORATION_COMMIT, 'last_updated_ms': expected_last_updated_ms, 'activity_id': EXP_ID, 'activity_title': EXP_TITLE, 'author_id': ANOTHER_USER_ID, 'subject': 'Update exploration', }], recent_updates)
def get(self): """Handles GET requests.""" # TODO(sll): Figure out what to do about explorations in categories # other than those explicitly listed. language_codes_to_short_descs = { lc['code']: utils.get_short_language_description(lc['description']) for lc in feconf.ALL_LANGUAGE_CODES } query_string = self.request.get('q') search_cursor = self.request.get('cursor', None) exp_summaries_list, search_cursor = ( exp_services.get_exploration_summaries_matching_query( query_string, cursor=search_cursor)) def _get_intro_card_color(category): return ( feconf.CATEGORIES_TO_COLORS[category] if category in feconf.CATEGORIES_TO_COLORS else feconf.DEFAULT_COLOR) # TODO(msl): Store 'is_editable' in exploration summary to avoid O(n) # individual lookups. Note that this will depend on user_id. explorations_list = [{ 'id': exp_summary.id, 'title': exp_summary.title, 'category': exp_summary.category, 'objective': exp_summary.objective, 'language_code': exp_summary.language_code, 'last_updated': utils.get_time_in_millisecs( exp_summary.exploration_model_last_updated), 'status': exp_summary.status, 'community_owned': exp_summary.community_owned, 'thumbnail_image_url': ( '/images/gallery/exploration_background_%s_small.png' % _get_intro_card_color(exp_summary.category)), 'is_editable': exp_services.is_exp_summary_editable( exp_summary, user_id=self.user_id) } for exp_summary in exp_summaries_list] if len(explorations_list) == feconf.DEFAULT_QUERY_LIMIT: logging.error( '%s explorations were fetched to load the gallery page. ' 'You may be running up against the default query limits.' % feconf.DEFAULT_QUERY_LIMIT) preferred_language_codes = [feconf.DEFAULT_LANGUAGE_CODE] if self.user_id: user_settings = user_services.get_user_settings(self.user_id) preferred_language_codes = user_settings.preferred_language_codes self.values.update({ 'explorations_list': explorations_list, 'preferred_language_codes': preferred_language_codes, 'search_cursor': search_cursor, }) self.render_json(self.values)
def test_making_feedback_thread_does_not_subscribe_to_exploration(self): with self._get_test_context(): self.signup(USER_A_EMAIL, USER_A_USERNAME) user_a_id = self.get_user_id_from_email(USER_A_EMAIL) self.signup(USER_B_EMAIL, USER_B_USERNAME) user_b_id = self.get_user_id_from_email(USER_B_EMAIL) # User A creates an exploration. self.save_new_valid_exploration( EXP_ID, user_a_id, title=EXP_TITLE, category='Category') exp_last_updated_ms = ( self._get_most_recent_exp_snapshot_created_on_ms(EXP_ID)) # User B starts a feedback thread. feedback_services.create_thread( EXP_ID, None, user_b_id, FEEDBACK_THREAD_SUBJECT, 'text') thread_id = feedback_services.get_all_threads( EXP_ID, False)[0].get_thread_id() message = feedback_services.get_messages( EXP_ID, thread_id)[0] ModifiedRecentUpdatesAggregator.start_computation() self.assertEqual( self.count_jobs_in_taskqueue( queue_name=taskqueue_services.QUEUE_NAME_DEFAULT), 1) self.process_and_flush_pending_tasks() recent_notifications_for_user_a = ( ModifiedRecentUpdatesAggregator.get_recent_notifications( user_a_id)[1]) recent_notifications_for_user_b = ( ModifiedRecentUpdatesAggregator.get_recent_notifications( user_b_id)[1]) expected_thread_notification = { 'activity_id': EXP_ID, 'activity_title': EXP_TITLE, 'author_id': user_b_id, 'last_updated_ms': utils.get_time_in_millisecs( message.created_on), 'subject': FEEDBACK_THREAD_SUBJECT, 'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE, } expected_creation_notification = ( self._get_expected_activity_created_dict( user_a_id, EXP_ID, EXP_TITLE, 'exploration', feconf.UPDATE_TYPE_EXPLORATION_COMMIT, exp_last_updated_ms)) # User A sees A's commit and B's feedback thread. self.assertEqual(recent_notifications_for_user_a, [ expected_thread_notification, expected_creation_notification ]) # User B sees only her feedback thread, but no commits. self.assertEqual(recent_notifications_for_user_b, [ expected_thread_notification, ])
def test_multiple_exploration_commits_and_feedback_messages(self): with self._get_test_context(): self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL) # User creates an exploration. self.save_new_valid_exploration( EXP_1_ID, editor_id, title=EXP_1_TITLE, category='Category') exp1_last_updated_ms = ( self._get_most_recent_exp_snapshot_created_on_ms(EXP_1_ID)) # User gives feedback on it. feedback_services.create_thread( EXP_1_ID, None, editor_id, FEEDBACK_THREAD_SUBJECT, 'text') thread_id = feedback_services.get_all_threads( EXP_1_ID, False)[0].get_thread_id() message = feedback_services.get_messages(EXP_1_ID, thread_id)[0] # User creates another exploration. self.save_new_valid_exploration( EXP_2_ID, editor_id, title=EXP_2_TITLE, category='Category') exp2_last_updated_ms = ( self._get_most_recent_exp_snapshot_created_on_ms(EXP_2_ID)) ModifiedRecentUpdatesAggregator.start_computation() self.assertEqual( self.count_jobs_in_taskqueue( queue_name=taskqueue_services.QUEUE_NAME_DEFAULT), 1) self.process_and_flush_pending_tasks() recent_notifications = ( ModifiedRecentUpdatesAggregator.get_recent_notifications( editor_id)[1]) self.assertEqual([( self._get_expected_activity_created_dict( editor_id, EXP_2_ID, EXP_2_TITLE, 'exploration', feconf.UPDATE_TYPE_EXPLORATION_COMMIT, exp2_last_updated_ms) ), { 'activity_id': EXP_1_ID, 'activity_title': EXP_1_TITLE, 'author_id': editor_id, 'last_updated_ms': utils.get_time_in_millisecs( message.created_on), 'subject': FEEDBACK_THREAD_SUBJECT, 'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE, }, ( self._get_expected_activity_created_dict( editor_id, EXP_1_ID, EXP_1_TITLE, 'exploration', feconf.UPDATE_TYPE_EXPLORATION_COMMIT, exp1_last_updated_ms) )], recent_notifications)
def _get_thread_dict_from_model_instance(thread): return { 'last_updated': utils.get_time_in_millisecs(thread.last_updated), 'original_author_username': user_services.get_username( thread.original_author_id) if thread.original_author_id else None, 'state_name': thread.state_name, 'status': thread.status, 'subject': thread.subject, 'summary': thread.summary, 'thread_id': get_thread_id_from_full_thread_id(thread.id)}
def get_statistics(cls, exploration_id, exploration_version): """Gets the statistics for the specified exploration and version. Args: exploration_id: str. The id of the exploration to get statistics for. exploration_version: str. Which version of the exploration to get statistics for. This can be a version number, the string 'all', or the string 'none'. Returns: dict. The keys of the dict are: 'start_exploration_count': # of times exploration was started. 'complete_exploration_count': # of times exploration was completed. 'state_hit_counts': a dict containing the hit counts for the states in the exploration. It is formatted as follows: { state_name: { 'first_entry_count': # of sessions which hit this state. 'total_entry_count': # of total hits for this state. 'no_answer_count': # of hits with no answer for this state. } } """ num_starts = 0 num_completions = 0 state_hit_counts = {} last_updated = None entity_id = stats_models.ExplorationAnnotationsModel.get_entity_id( exploration_id, exploration_version) mr_model = stats_models.ExplorationAnnotationsModel.get( entity_id, strict=False) if mr_model is not None: num_starts += mr_model.num_starts num_completions += mr_model.num_completions state_hit_counts = mr_model.state_hit_counts last_updated = utils.get_time_in_millisecs(mr_model.last_updated) realtime_model = cls._get_realtime_datastore_class().get( cls.get_active_realtime_layer_id(exploration_id), strict=False) if realtime_model is not None: num_starts += realtime_model.num_starts num_completions += realtime_model.num_completions return { 'start_exploration_count': num_starts, 'complete_exploration_count': num_completions, 'state_hit_counts': state_hit_counts, 'last_updated': last_updated, }
def map(item): user_id = item.id job_queued_msec = RecentUpdatesMRJobManager._get_job_queued_msec() reducer_key = "%s@%s" % (user_id, job_queued_msec) exploration_ids_list = item.activity_ids collection_ids_list = item.collection_ids feedback_thread_ids_list = item.feedback_thread_ids ( most_recent_activity_commits, tracked_exp_models_for_feedback, ) = RecentUpdatesMRJobManager._get_most_recent_activity_commits( exp_models.ExplorationModel, exploration_ids_list, "exploration", feconf.UPDATE_TYPE_EXPLORATION_COMMIT, feconf.COMMIT_MESSAGE_EXPLORATION_DELETED, ) for exp_model in tracked_exp_models_for_feedback: threads = feedback_services.get_threadlist(exp_model.id) for thread in threads: if thread["thread_id"] not in feedback_thread_ids_list: feedback_thread_ids_list.append(thread["thread_id"]) # TODO(bhenning): Implement a solution to having feedback threads for # collections. most_recent_activity_commits += ( RecentUpdatesMRJobManager._get_most_recent_activity_commits( collection_models.CollectionModel, collection_ids_list, "collection", feconf.UPDATE_TYPE_COLLECTION_COMMIT, feconf.COMMIT_MESSAGE_COLLECTION_DELETED, ) )[0] for recent_activity_commit_dict in most_recent_activity_commits: yield (reducer_key, recent_activity_commit_dict) for feedback_thread_id in feedback_thread_ids_list: last_message = feedback_models.FeedbackMessageModel.get_most_recent_message(feedback_thread_id) yield ( reducer_key, { "type": feconf.UPDATE_TYPE_FEEDBACK_MESSAGE, "activity_id": last_message.exploration_id, "activity_title": exp_models.ExplorationModel.get_by_id(last_message.exploration_id).title, "author_id": last_message.author_id, "last_updated_ms": utils.get_time_in_millisecs(last_message.created_on), "subject": last_message.get_thread_subject(), }, )
def map(item): user_id = item.id job_queued_msec = RecentUpdatesMRJobManager._get_job_queued_msec() reducer_key = '%s@%s' % (user_id, job_queued_msec) exploration_ids_list = item.activity_ids collection_ids_list = item.collection_ids feedback_thread_ids_list = item.feedback_thread_ids (most_recent_activity_commits, tracked_exp_models_for_feedback) = ( RecentUpdatesMRJobManager._get_most_recent_activity_commits( exp_models.ExplorationModel, exploration_ids_list, 'exploration', feconf.UPDATE_TYPE_EXPLORATION_COMMIT, feconf.COMMIT_MESSAGE_EXPLORATION_DELETED)) for exp_model in tracked_exp_models_for_feedback: threads = feedback_services.get_all_threads(exp_model.id, False) for thread in threads: full_thread_id = ( feedback_models.FeedbackThreadModel.generate_full_thread_id( exp_model.id, thread['thread_id'])) if full_thread_id not in feedback_thread_ids_list: feedback_thread_ids_list.append(full_thread_id) # TODO(bhenning): Implement a solution to having feedback threads for # collections. most_recent_activity_commits += ( RecentUpdatesMRJobManager._get_most_recent_activity_commits( collection_models.CollectionModel, collection_ids_list, 'collection', feconf.UPDATE_TYPE_COLLECTION_COMMIT, feconf.COMMIT_MESSAGE_COLLECTION_DELETED))[0] for recent_activity_commit_dict in most_recent_activity_commits: yield (reducer_key, recent_activity_commit_dict) for feedback_thread_id in feedback_thread_ids_list: exp_id = feedback_services.get_exp_id_from_full_thread_id( feedback_thread_id) thread_id = feedback_services.get_thread_id_from_full_thread_id( feedback_thread_id) last_message = ( feedback_models.FeedbackMessageModel.get_most_recent_message( exp_id, thread_id)) yield (reducer_key, { 'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE, 'activity_id': last_message.exploration_id, 'activity_title': exp_models.ExplorationModel.get_by_id( last_message.exploration_id).title, 'author_id': last_message.author_id, 'last_updated_ms': utils.get_time_in_millisecs( last_message.created_on), 'subject': last_message.get_thread_subject(), })
def get_threadlist(exploration_id): return [{ 'last_updated': utils.get_time_in_millisecs(t.last_updated), 'original_author_username': user_services.get_username( t.original_author_id) if t.original_author_id else None, 'state_name': t.state_name, 'status': t.status, 'subject': t.subject, 'summary': t.summary, 'thread_id': t.id, } for t in feedback_models.FeedbackThreadModel.get_threads(exploration_id)]
def _entity_created_before_job_queued(entity): """Checks that the given entity was created before the MR job was queued. Mapper methods may want to use this as a precomputation check, especially if the datastore classes being iterated over are append-only event logs. """ created_on_msec = utils.get_time_in_millisecs(entity.created_on) job_queued_msec = float(context.get().mapreduce_spec.mapper.params[ MAPPER_PARAM_KEY_QUEUED_TIME_MSECS]) return job_queued_msec >= created_on_msec
def to_dict(self): return { 'last_updated': utils.get_time_in_millisecs(self.last_updated), 'original_author_username': user_services.get_username( self.original_author_id) if self.original_author_id else None, 'state_name': self.state_name, 'status': self.status, 'subject': self.subject, 'summary': self.summary, 'thread_id': self.get_thread_id() }
def _get_message_dict(message_instance): return { 'author_username': ( user_services.get_username(message_instance.author_id) if message_instance.author_id else None), 'created_on': utils.get_time_in_millisecs(message_instance.created_on), 'exploration_id': message_instance.exploration_id, 'message_id': message_instance.message_id, 'text': message_instance.text, 'updated_status': message_instance.updated_status, 'updated_subject': message_instance.updated_subject, }
def _get_message_dict(message_instance): return { "author_username": ( user_services.get_username(message_instance.author_id) if message_instance.author_id else None ), "created_on": utils.get_time_in_millisecs(message_instance.created_on), "exploration_id": message_instance.exploration_id, "message_id": message_instance.message_id, "text": message_instance.text, "updated_status": message_instance.updated_status, "updated_subject": message_instance.updated_subject, }
def to_dict(self): return { 'author_username': ( user_services.get_username(self.author_id) if self.author_id else None), 'created_on': utils.get_time_in_millisecs(self.created_on), 'exploration_id': self.exploration_id, 'message_id': self.message_id, 'text': self.text, 'updated_status': self.updated_status, 'updated_subject': self.updated_subject, }
def _get_thread_dict_from_model_instance(thread): return { "last_updated": utils.get_time_in_millisecs(thread.last_updated), "original_author_username": user_services.get_username(thread.original_author_id) if thread.original_author_id else None, "state_name": thread.state_name, "status": thread.status, "subject": thread.subject, "summary": thread.summary, "thread_id": get_thread_id_from_full_thread_id(thread.id), }
def to_dict(self): """This omits created_on, user_id and (for now) commit_cmds.""" return { 'last_updated': utils.get_time_in_millisecs(self.last_updated), 'username': self.username, 'collection_id': self.collection_id, 'commit_type': self.commit_type, 'commit_message': self.commit_message, 'version': self.version, 'post_commit_status': self.post_commit_status, 'post_commit_community_owned': self.post_commit_community_owned, 'post_commit_is_private': self.post_commit_is_private, }
def get_last_seen_notifications_msec(user_id): """Returns the last time, in milliseconds since the Epoch, when the user checked their notifications in the dashboard page or the notifications dropdown. If the user has never checked the dashboard page or the notifications dropdown, returns None. """ subscriptions_model = user_models.UserSubscriptionsModel.get( user_id, strict=False) return ( utils.get_time_in_millisecs(subscriptions_model.last_checked) if (subscriptions_model and subscriptions_model.last_checked) else None)
def _get_displayable_collection_summary_dicts(collection_summaries): """Gets a summary of collections in human readable form. Args: collection_summaries: list(CollectionSummary). List of collection summary domain object. Return: list(dict). A list of exploration summary dicts in human readable form. Example: [ { 'category': u'A category', 'community_owned': False, 'id': 'eid2', 'language_code': 'en', 'num_views': 0, 'objective': u'An objective', 'status': 'public', 'tags': [], 'thumbnail_bg_color': '#a33f40', 'thumbnail_icon_url': self.get_static_asset_url( '/images/subjects/Lightbulb.svg'), 'title': u'Exploration 2 Albert title', }, ] """ displayable_collection_summaries = [] for collection_summary in collection_summaries: if collection_summary and collection_summary.status != ( rights_manager.ACTIVITY_STATUS_PRIVATE): displayable_collection_summaries.append({ 'id': collection_summary.id, 'title': collection_summary.title, 'category': collection_summary.category, 'activity_type': constants.ACTIVITY_TYPE_COLLECTION, 'objective': collection_summary.objective, 'language_code': collection_summary.language_code, 'tags': collection_summary.tags, 'node_count': collection_summary.node_count, 'last_updated_msec': utils.get_time_in_millisecs( collection_summary.collection_model_last_updated), 'thumbnail_icon_url': ( utils.get_thumbnail_icon_url_for_category( collection_summary.category)), 'thumbnail_bg_color': utils.get_hex_color_for_category( collection_summary.category)}) return displayable_collection_summaries
def _get_displayable_collection_summary_dicts(collection_summaries): """Gets a summary of collections in human readable form. Args: collection_summaries: list(CollectionSummary). List of collection summary domain object. Return: list(dict). A list of exploration summary dicts in human readable form. Example: [ { 'category': u'A category', 'community_owned': False, 'id': 'eid2', 'language_code': 'en', 'num_views': 0, 'objective': u'An objective', 'status': 'public', 'tags': [], 'thumbnail_bg_color': '#a33f40', 'thumbnail_icon_url': self.get_static_asset_url( '/images/subjects/Lightbulb.svg'), 'title': u'Exploration 2 Albert title', }, ] """ displayable_collection_summaries = [] for collection_summary in collection_summaries: if collection_summary and collection_summary.status != ( rights_manager.ACTIVITY_STATUS_PRIVATE): displayable_collection_summaries.append({ 'id': collection_summary.id, 'title': collection_summary.title, 'category': collection_summary.category, 'activity_type': feconf.ACTIVITY_TYPE_COLLECTION, 'objective': collection_summary.objective, 'language_code': collection_summary.language_code, 'tags': collection_summary.tags, 'node_count': collection_summary.node_count, 'last_updated_msec': utils.get_time_in_millisecs( collection_summary.collection_model_last_updated), 'thumbnail_icon_url': ( utils.get_thumbnail_icon_url_for_category( collection_summary.category)), 'thumbnail_bg_color': utils.get_hex_color_for_category( collection_summary.category)}) return displayable_collection_summaries
def map(item): user_id = item.id job_queued_msec = RecentUpdatesMRJobManager._get_job_queued_msec() reducer_key = '%s@%s' % (user_id, job_queued_msec) exploration_ids_list = item.activity_ids collection_ids_list = item.collection_ids feedback_thread_ids_list = item.feedback_thread_ids (most_recent_activity_commits, tracked_exp_models_for_feedback) = ( RecentUpdatesMRJobManager._get_most_recent_activity_commits( exp_models.ExplorationModel, exploration_ids_list, 'exploration', feconf.UPDATE_TYPE_EXPLORATION_COMMIT, feconf.COMMIT_MESSAGE_EXPLORATION_DELETED)) for exp_model in tracked_exp_models_for_feedback: threads = feedback_services.get_threadlist(exp_model.id) for thread in threads: if thread['thread_id'] not in feedback_thread_ids_list: feedback_thread_ids_list.append(thread['thread_id']) # TODO(bhenning): Implement a solution to having feedback threads for # collections. most_recent_activity_commits += ( RecentUpdatesMRJobManager._get_most_recent_activity_commits( collection_models.CollectionModel, collection_ids_list, 'collection', feconf.UPDATE_TYPE_COLLECTION_COMMIT, feconf.COMMIT_MESSAGE_COLLECTION_DELETED))[0] for recent_activity_commit_dict in most_recent_activity_commits: yield (reducer_key, recent_activity_commit_dict) for feedback_thread_id in feedback_thread_ids_list: last_message = ( feedback_models.FeedbackMessageModel.get_most_recent_message( feedback_thread_id)) yield (reducer_key, { 'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE, 'activity_id': last_message.exploration_id, 'activity_title': exp_models.ExplorationModel.get_by_id( last_message.exploration_id).title, 'author_id': last_message.author_id, 'last_updated_ms': utils.get_time_in_millisecs( last_message.created_on), 'subject': last_message.get_thread_subject(), })
def get_statistics(cls, exploration_id, exploration_version): """ Args: - exploration_id: id of the exploration to get statistics for - exploration_version: str. Which version of the exploration to get statistics for; this can be a version number, the string 'all', or the string 'none'. Returns a dict with the following keys: - 'start_exploration_count': # of times exploration was started - 'complete_exploration_count': # of times exploration was completed - 'state_hit_counts': a dict containing the hit counts for the states in the exploration. It is formatted as follows: { state_name: { 'first_entry_count': # of sessions which hit this state 'total_entry_count': # of total hits for this state 'no_answer_count': # of hits with no answer for this state } } """ num_starts = 0 num_completions = 0 state_hit_counts = {} last_updated = None entity_id = stats_models.ExplorationAnnotationsModel.get_entity_id(exploration_id, exploration_version) mr_model = stats_models.ExplorationAnnotationsModel.get(entity_id, strict=False) if mr_model is not None: num_starts += mr_model.num_starts num_completions += mr_model.num_completions state_hit_counts = mr_model.state_hit_counts last_updated = utils.get_time_in_millisecs(mr_model.last_updated) realtime_model = cls._get_realtime_datastore_class().get( cls.get_active_realtime_layer_id(exploration_id), strict=False ) if realtime_model is not None: num_starts += realtime_model.num_starts num_completions += realtime_model.num_completions return { "start_exploration_count": num_starts, "complete_exploration_count": num_completions, "state_hit_counts": state_hit_counts, "last_updated": last_updated, }
def test_to_dict(self): fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) expected_message_dict = { 'author_username': self.OWNER_USERNAME, 'created_on': utils.get_time_in_millisecs(fake_date), 'exploration_id': self.EXP_ID, 'message_id': self.MESSAGE_ID, 'text': 'a message text', 'updated_status': 'open', 'updated_subject': 'an updated subject' } observed_message = feedback_domain.FeedbackMessage( self.FULL_MESSAGE_ID, self.FULL_THREAD_ID, self.MESSAGE_ID, self.owner_id, expected_message_dict['updated_status'], expected_message_dict['updated_subject'], expected_message_dict['text'], fake_date, fake_date) self.assertDictEqual(expected_message_dict, observed_message.to_dict())
def map(item): exp_id = item.get_unversioned_instance_id() exp_rights = rights_manager.get_exploration_rights( exp_id, strict=False) if exp_rights is None: return exp_first_published_msec = exp_rights.first_published_msec # First contribution time in msec is only set from contributions to # explorations that are currently published. if not rights_manager.is_exploration_private(exp_id): created_on_msec = utils.get_time_in_millisecs(item.created_on) yield ( item.committer_id, max(exp_first_published_msec, created_on_msec) )
def get(self, username): """Handles GET requests.""" if not username: raise self.PageNotFoundException user_settings = user_services.get_user_settings_from_username(username) if not user_settings: raise self.PageNotFoundException self.values.update({ 'user_bio': user_settings.user_bio, 'first_contribution_datetime': ( utils.get_time_in_millisecs(user_settings.first_contribution_datetime) if user_settings.first_contribution_datetime else None), 'profile_picture_data_url': user_settings.profile_picture_data_url, }) self.render_json(self.values)
def test_to_dict(self): fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) expected_thread_dict = { 'thread_id': self.THREAD_ID, 'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': self.VIEWER_USERNAME, 'subject': u'a subject', 'last_updated': utils.get_time_in_millisecs(fake_date) } observed_thread = feedback_domain.FeedbackThread( self.FULL_THREAD_ID, self.EXP_ID, expected_thread_dict['state_name'], self.viewer_id, expected_thread_dict['status'], expected_thread_dict['subject'], expected_thread_dict['summary'], False, fake_date, fake_date) self.assertDictEqual(expected_thread_dict, observed_thread.to_dict())
def to_dict(self): return { 'author_username': (user_services.get_username(self.author_id) if self.author_id else None), 'created_on': utils.get_time_in_millisecs(self.created_on), 'exploration_id': self.exploration_id, 'message_id': self.message_id, 'text': self.text, 'updated_status': self.updated_status, 'updated_subject': self.updated_subject, }
def to_dict(self): """Returns a dict representation of this FeedbackThread object. Returns: dict. A dict representation of the FeedbackThread object. """ return { 'last_updated': utils.get_time_in_millisecs(self.last_updated), 'original_author_username': user_services.get_username( self.original_author_id) if self.original_author_id else None, 'state_name': self.state_name, 'status': self.status, 'subject': self.subject, 'summary': self.summary, 'thread_id': self.id, 'message_count': self.message_count }
def setUp(self): super(SkillSummaryTests, self).setUp() current_time = datetime.datetime.utcnow() time_in_millisecs = utils.get_time_in_millisecs(current_time) self.skill_summary_dict = { 'id': 'skill_id', 'description': 'description', 'language_code': 'en', 'version': 1, 'misconception_count': 1, 'worked_examples_count': 1, 'skill_model_created_on': time_in_millisecs, 'skill_model_last_updated': time_in_millisecs } self.skill_summary = skill_domain.SkillSummary( 'skill_id', 'description', 'en', 1, 1, 1, current_time, current_time)
def get_threadlist(exploration_id): return [{ 'last_updated': utils.get_time_in_millisecs(t.last_updated), 'original_author_username': user_services.get_username(t.original_author_id) if t.original_author_id else None, 'state_name': t.state_name, 'status': t.status, 'subject': t.subject, 'summary': t.summary, 'thread_id': t.id, } for t in feedback_models.FeedbackThreadModel.get_threads(exploration_id)]
def _get_message_dict(message_instance): return { 'author_username': (user_services.get_username(message_instance.author_id) if message_instance.author_id else None), 'created_on': utils.get_time_in_millisecs(message_instance.created_on), 'exploration_id': message_instance.exploration_id, 'message_id': message_instance.message_id, 'text': message_instance.text, 'updated_status': message_instance.updated_status, 'updated_subject': message_instance.updated_subject, }
def _get_thread_dict_from_model_instance(thread): return { 'last_updated': utils.get_time_in_millisecs(thread.last_updated), 'original_author_username': user_services.get_username(thread.original_author_id) if thread.original_author_id else None, 'state_name': thread.state_name, 'status': thread.status, 'subject': thread.subject, 'summary': thread.summary, 'thread_id': get_thread_id_from_full_thread_id(thread.id) }
def setUp(self): super(StorySummaryTests, self).setUp() current_time = datetime.datetime.utcnow() time_in_millisecs = utils.get_time_in_millisecs(current_time) self.story_summary_dict = { 'story_model_created_on': time_in_millisecs, 'version': 1, 'story_model_last_updated': time_in_millisecs, 'description': 'description', 'title': 'title', 'node_count': 10, 'language_code': 'en', 'id': 'story_id' } self.story_summary = story_domain.StorySummary( 'story_id', 'title', 'description', 'en', 1, 10, current_time, current_time)
def to_dict(self): """Returns a dict representation of this FeedbackMessage object. Returns: dict. Dict representation of the FeedbackMessage object. """ return { 'author_username': ( user_services.get_username(self.author_id) if self.author_id else None), 'created_on': utils.get_time_in_millisecs(self.created_on), 'exploration_id': self.exploration_id, 'message_id': self.message_id, 'text': self.text, 'updated_status': self.updated_status, 'updated_subject': self.updated_subject, 'received_via_email': self.received_via_email }
def to_dict(self): return { 'last_updated': utils.get_time_in_millisecs(self.last_updated), 'original_author_username': user_services.get_username(self.original_author_id) if self.original_author_id else None, 'state_name': self.state_name, 'status': self.status, 'subject': self.subject, 'summary': self.summary, 'thread_id': self.get_thread_id() }
def _get_explorations_summary_dict( exploration_rights, include_timestamps=False): """Returns exploration summaries corresponding to the given rights objects. The summary is a dict that is keyed by exploration id. Each value is a dict with the following keys: title, category and rights. The value for 'rights' is the rights object, represented as a dict. """ id_to_rights = {rights.id: rights for rights in exploration_rights} exp_ids = [rights.id for rights in exploration_rights] memcached_keys = [ _get_exploration_memcache_key(exp_id) for exp_id in exp_ids] memcached_explorations = memcache_services.get_multi(memcached_keys) uncached_exp_ids = [] for ind, key in enumerate(memcached_keys): if key not in memcached_explorations: uncached_exp_ids.append(exp_ids[ind]) exploration_models = exp_models.ExplorationModel.get_multi( uncached_exp_ids) exps_to_cache = {} for ind, model in enumerate(exploration_models): if model: exps_to_cache[_get_exploration_memcache_key(model.id)] = ( get_exploration_from_model(model)) else: logging.error( 'Could not find exploration %s' % uncached_exp_ids[ind]) memcache_services.set_multi(exps_to_cache) result = {} for exploration in ( memcached_explorations.values() + exps_to_cache.values()): result[exploration.id] = { 'title': exploration.title, 'category': exploration.category, 'rights': id_to_rights[exploration.id].to_dict(), } if include_timestamps: result[exploration.id]['last_updated'] = ( utils.get_time_in_millisecs( exploration.last_updated)) return result
def test_to_dict(self): fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) expected_message_dict = { 'author_username': self.OWNER_USERNAME, 'created_on': utils.get_time_in_millisecs(fake_date), 'entity_type': feconf.ENTITY_TYPE_EXPLORATION, 'entity_id': self.EXP_ID, 'message_id': self.MESSAGE_ID, 'text': 'a message text', 'updated_status': 'open', 'updated_subject': 'an updated subject', 'received_via_email': False } observed_message = feedback_domain.FeedbackMessage( self.FULL_MESSAGE_ID, self.THREAD_ID, self.MESSAGE_ID, self.owner_id, expected_message_dict['updated_status'], expected_message_dict['updated_subject'], expected_message_dict['text'], fake_date, fake_date, False) self.assertDictEqual(expected_message_dict, observed_message.to_dict())
def test_export_data_nontrivial(self): user_data = ( feedback_models .GeneralFeedbackThreadModel.export_data(self.USER_ID)) feedback_id = '%s.%s.%s' % (self.ENTITY_TYPE, self.ENTITY_ID, 'random') test_data = { feedback_id: { 'entity_type': self.ENTITY_TYPE, 'entity_id': self.ENTITY_ID, 'status': self.STATUS, 'subject': self.SUBJECT, 'has_suggestion': self.HAS_SUGGESTION, 'summary': self.SUMMARY, 'message_count': self.MESSAGE_COUNT, 'last_updated_msec': utils.get_time_in_millisecs( self.feedback_thread_model.last_updated) } } self.assertEqual(user_data, test_data)
def to_dict(self): """Returns a dict representation of a suggestion object. Returns: dict. A dict representation of a suggestion object. """ return { 'suggestion_id': self.suggestion_id, 'suggestion_type': self.suggestion_type, 'target_type': self.target_type, 'target_id': self.target_id, 'target_version_at_submission': self.target_version_at_submission, 'status': self.status, 'author_name': self.get_author_name(), 'final_reviewer_id': self.final_reviewer_id, 'change': self.change.to_dict(), 'score_category': self.score_category, 'last_updated': utils.get_time_in_millisecs(self.last_updated) }
def entity_created_before_job_queued(entity): """Checks that the given entity was created before the MR job was queued. Mapper methods may want to use this as a precomputation check, especially if the datastore classes being iterated over are append-only event logs. Args: entity: BaseModel. An entity this job type is responsible for handling. Returns: bool. Whether the entity was queued before the job was created. """ created_on_msec = utils.get_time_in_millisecs(entity.created_on) job_queued_msec = float(context.get().mapreduce_spec.mapper.params[ MAPPER_PARAM_KEY_QUEUED_TIME_MSECS]) return job_queued_msec >= created_on_msec
def to_dict(self): """Returns a dict representing this CollectionCommitLogEntry domain object. This omits created_on, user_id and (for now) commit_cmds. Returns: A dict, mapping all fields of CollectionCommitLogEntry instance, except created_on, user_id and (for now) commit_cmds field. """ return { 'last_updated': utils.get_time_in_millisecs(self.last_updated), 'username': self.username, 'collection_id': self.collection_id, 'commit_type': self.commit_type, 'commit_message': self.commit_message, 'version': self.version, 'post_commit_status': self.post_commit_status, 'post_commit_community_owned': self.post_commit_community_owned, 'post_commit_is_private': self.post_commit_is_private, }
def test_first_published_time_of_exploration_that_is_unpublished(self): """This tests that, if an exploration is published, unpublished, and then published again, the job uses the first publication time as the value for first_published_msec. """ self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME) admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL) self.set_admins([self.ADMIN_USERNAME]) self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME) owner_id = self.get_user_id_from_email(self.OWNER_EMAIL) owner = user_services.UserActionsInfo(owner_id) admin = user_services.UserActionsInfo(admin_id) self.save_new_valid_exploration(self.EXP_ID, owner_id, end_state_name='End') rights_manager.publish_exploration(owner, self.EXP_ID) job_class = exp_jobs_one_off.ExplorationFirstPublishedOneOffJob job_id = job_class.create_new() exp_jobs_one_off.ExplorationFirstPublishedOneOffJob.enqueue(job_id) self.process_and_flush_pending_tasks() exploration_rights = rights_manager.get_exploration_rights(self.EXP_ID) # Test to see whether first_published_msec was correctly updated. exp_first_published = exploration_rights.first_published_msec exp_rights_model = exp_models.ExplorationRightsModel.get(self.EXP_ID) last_updated_time_msec = utils.get_time_in_millisecs( exp_rights_model.last_updated) self.assertLess(exp_first_published, last_updated_time_msec) rights_manager.unpublish_exploration(admin, self.EXP_ID) rights_manager.publish_exploration(owner, self.EXP_ID) job_id = job_class.create_new() exp_jobs_one_off.ExplorationFirstPublishedOneOffJob.enqueue(job_id) self.process_and_flush_pending_tasks() # Test to see whether first_published_msec remains the same despite the # republication. exploration_rights = rights_manager.get_exploration_rights(self.EXP_ID) self.assertEqual(exp_first_published, exploration_rights.first_published_msec)
def to_dict(self): """Returns dict representation of the FeedbackThreadSummary object. Returns: dict. Dict representation of the FeedbackThreadSummary object. """ return { 'status': self.status, 'original_author_id': self.original_author_id, 'last_updated': utils.get_time_in_millisecs(self.last_updated), 'last_message_text': self.last_message_text, 'total_message_count': self.total_message_count, 'last_message_is_read': self.last_message_is_read, 'second_last_message_is_read': self.second_last_message_is_read, 'author_last_message': self.author_last_message, 'author_second_last_message': self.author_second_last_message, 'exploration_title': self.exploration_title, 'exploration_id': self.exploration_id, 'thread_id': self.thread_id, }
def test_export_data_nontrivial(self): user_data = blog_models.BlogPostModel.export_data(self.USER_ID), blog_post_id = 'blog_one' test_data = ({ blog_post_id: { 'content': self.CONTENT, 'title': self.TITLE, 'published_on': utils.get_time_in_millisecs(self.blog_post_model.published_on), 'url_fragment': 'sample-url-fragment', 'tags': self.TAGS, 'thumbnail_filename': self.THUMBNAIL } }, ) self.assertEqual(user_data, test_data)
def test_contribution_datetime(self): #Test the contribution date shows up correctly as nonexist. self.signup(self.EMAIL, self.USERNAME) self.login(self.EMAIL) self.user_id = self.get_user_id_from_email(self.EMAIL) response_dict = self.get_json( '/profilehandler/data/%s' % self.USERNAME) self.assertEqual(response_dict['first_contribution_datetime'], None) #Update the first_contribution_datetime to the current datetime. current_datetime = datetime.datetime.utcnow() user_services.update_first_contribution_datetime( self.user_id,current_datetime) #Test the contribution date correctly changes to set date time. response_dict = self.get_json( '/profilehandler/data/%s' % self.USERNAME) self.assertEqual( response_dict['first_contribution_datetime'], utils.get_time_in_millisecs(current_datetime))
def setUp(self): super(SuggestionEditStateContentUnitTests, self).setUp() self.signup(self.AUTHOR_EMAIL, 'author') self.author_id = self.get_user_id_from_email(self.AUTHOR_EMAIL) self.signup(self.REVIEWER_EMAIL, 'reviewer') self.reviewer_id = self.get_user_id_from_email(self.REVIEWER_EMAIL) self.signup(self.ASSIGNED_REVIEWER_EMAIL, 'assignedReviewer') self.assigned_reviewer_id = self.get_user_id_from_email( self.ASSIGNED_REVIEWER_EMAIL) self.suggestion_dict = { 'suggestion_id': 'exploration.exp1.thread1', 'suggestion_type': (suggestion_models.SUGGESTION_TYPE_EDIT_STATE_CONTENT), 'target_type': suggestion_models.TARGET_TYPE_EXPLORATION, 'target_id': 'exp1', 'target_version_at_submission': 1, 'status': suggestion_models.STATUS_ACCEPTED, 'author_name': 'author', 'final_reviewer_id': self.reviewer_id, 'assigned_reviewer_id': self.assigned_reviewer_id, 'change_cmd': { 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY, 'property_name': exp_domain.STATE_PROPERTY_CONTENT, 'state_name': 'state_1', 'new_value': 'new suggestion content', 'old_value': None }, 'score_category': 'content.Algebra', 'last_updated': utils.get_time_in_millisecs(self.fake_date) }
def test_to_dict(self): fake_date = datetime.datetime(2016, 4, 10, 0, 0, 0, 0) expected_thread_dict = { 'thread_id': self.THREAD_ID, 'status': u'open', 'state_name': u'a_state_name', 'summary': None, 'original_author_username': self.VIEWER_USERNAME, 'message_count': 1, 'subject': u'a subject', 'last_updated': utils.get_time_in_millisecs(fake_date), 'last_nonempty_message_text': 'last message', 'last_nonempty_message_author': self.VIEWER_USERNAME, } observed_thread = feedback_domain.FeedbackThread( self.THREAD_ID, feconf.ENTITY_TYPE_EXPLORATION, self.EXP_ID, expected_thread_dict['state_name'], self.viewer_id, expected_thread_dict['status'], expected_thread_dict['subject'], expected_thread_dict['summary'], False, 1, fake_date, fake_date, 'last message', self.viewer_id) self.assertDictEqual(expected_thread_dict, observed_thread.to_dict())
def setUp(self): super(TopicSummaryTests, self).setUp() current_time = datetime.datetime.utcnow() time_in_millisecs = utils.get_time_in_millisecs(current_time) self.topic_summary_dict = { 'id': 'topic_id', 'name': 'name', 'language_code': 'en', 'version': 1, 'canonical_story_count': 1, 'additional_story_count': 1, 'uncategorized_skill_count': 1, 'subtopic_count': 1, 'total_skill_count': 1, 'topic_model_created_on': time_in_millisecs, 'topic_model_last_updated': time_in_millisecs } self.topic_summary = topic_domain.TopicSummary( 'topic_id', 'name', 'name', 'en', 1, 1, 1, 1, 1, 1, current_time, current_time)
def get_snapshots_metadata(cls, model_instance_id, version_numbers, allow_deleted=False): """Returns a list of dicts, each representing a model snapshot. One dict is returned for each version number in the list of version numbers requested. If any of the version numbers does not exist, an error is raised. If `allow_deleted` is False, an error is raised if the current model has been deleted. """ if not allow_deleted: cls.get(model_instance_id)._require_not_marked_deleted() snapshot_ids = [ cls._get_snapshot_id(model_instance_id, version_number) for version_number in version_numbers ] metadata_keys = [ ndb.Key(cls.SNAPSHOT_METADATA_CLASS, snapshot_id) for snapshot_id in snapshot_ids ] returned_models = ndb.get_multi(metadata_keys) for ind, model in enumerate(returned_models): if model is None: raise Exception( 'Invalid version number %s for model %s with id %s' % (version_numbers[ind], cls.__name__, model_instance_id)) return [{ 'committer_id': model.committer_id, 'commit_message': model.commit_message, 'commit_cmds': model.commit_cmds, 'commit_type': model.commit_type, 'version_number': version_numbers[ind], 'created_on_ms': utils.get_time_in_millisecs(model.created_on), } for (ind, model) in enumerate(returned_models)]