def test_parse_examlog(self, save_notifications): examlog = ExamLog.objects.create(exam=self.exam, user=self.user1) parse_examlog(examlog, local_now()) assert save_notifications.called is False examlog.closed = True parse_examlog(examlog, local_now()) assert save_notifications.called notification = save_notifications.call_args[0][0][0] assert notification.notification_object == NotificationObjectType.Quiz assert notification.notification_event == NotificationEventType.Completed
def test_schedule_a_function_gives_value_error_repeat_zero_interval( self, scheduler ): now = local_now() with pytest.raises(ValueError) as error: scheduler.schedule(now, id, interval=0, repeat=None) assert "specify an interval" in str(error.value)
def test_create_examlog(self, save_notifications): examlog = ExamLog.objects.create(exam=self.exam, user=self.user1) create_examlog(examlog, local_now()) assert save_notifications.called notification = save_notifications.call_args[0][0][0] assert notification.notification_object == NotificationObjectType.Quiz assert notification.notification_event == NotificationEventType.Started
def update_log(cls, user, user_agent): """ Update the current UserSessionLog for a particular user. ua_parser never defaults the setting of os.family and user_agent.family It uses the value 'other' whenever the values are not recognized or the parsing fails. The code depends on this behaviour. """ if user and isinstance(user, FacilityUser): try: user_session_log = cls.objects.filter( user=user).latest("last_interaction_timestamp") except ObjectDoesNotExist: user_session_log = None if (not user_session_log or timezone.now() - user_session_log.last_interaction_timestamp > timedelta(minutes=5)): parsed_string = user_agent_parser.Parse(user_agent) device_info = ( "{os_family},{os_major}/{browser_family},{browser_major}". format( os_family=parsed_string["os"].get("family", ""), os_major=parsed_string["os"].get("major", ""), browser_family=parsed_string["user_agent"].get( "family", ""), browser_major=parsed_string["user_agent"].get( "major", ""), )) user_session_log = cls(user=user, device_info=device_info) user_session_log.last_interaction_timestamp = local_now() user_session_log.save()
def perform_ping(started, server=DEFAULT_SERVER_URL): url = urljoin(server, "/api/v1/pingback") instance, _ = InstanceIDModel.get_or_create_current_instance() language = get_device_setting("language_id", "") try: timezone = get_current_timezone().zone except Exception: timezone = "" data = { "instance_id": instance.id, "version": kolibri.__version__, "mode": conf.OPTIONS["Deployment"]["RUN_MODE"], "platform": instance.platform, "sysversion": instance.sysversion, "database_id": instance.database.id, "system_id": instance.system_id, "node_id": instance.node_id, "language": language, "timezone": timezone, "uptime": int((local_now() - started).total_seconds() / 60), "timestamp": localtime(), "installer": installation_type(), } logger.debug("Pingback data: {}".format(data)) jsondata = dump_zipped_json(data) response = requests.post(url, data=jsondata, timeout=60) response.raise_for_status() return json.loads(response.content.decode() or "{}")
def test_cancel_removes_job(self, scheduler): job_id = scheduler.enqueue_at(local_now(), id) scheduler.cancel(job_id) with pytest.raises(JobNotFound): scheduler.get_job(job_id)
def test_scheduled_repeating_function_sets_endless_repeat_new_job(self, scheduler): now = local_now() scheduler.schedule(now, id, interval=1000, repeat=None) scheduler.check_schedule() with scheduler.session_scope() as session: scheduled_job = scheduler._ns_query(session).one_or_none() repeat = scheduled_job.repeat assert repeat is None
def update(self, instance, validated_data): # This has changed, set the completion timestamp if validated_data.get("closed") and not instance.closed: instance.completion_timestamp = now() instance = super(ExamLogSerializer, self).update(instance, validated_data) # to check if a notification must be created: wrap_to_save_queue(parse_examlog, instance, local_now()) return instance
def schedule_vacuum(): current_dt = local_now() vacuum_time = current_dt.replace(hour=3, minute=0, second=0, microsecond=0) if vacuum_time < current_dt: # If it is past 3AM, change the day to tomorrow. vacuum_time = vacuum_time + timedelta(days=1) # Repeat indefinitely scheduler.schedule(vacuum_time, perform_vacuum, repeat=None, interval=24 * 60 * 60)
def test_schedule_a_function_sets_time(self, scheduler): now = local_now() job_id = scheduler.schedule(now, id) with scheduler.session_scope() as session: scheduled_job = (scheduler._ns_query(session).filter_by( id=job_id).one_or_none()) scheduled_time = scheduled_job.scheduled_time assert scheduled_time == naive_utc_datetime(now)
def test_scheduled_repeating_function_sets_new_job_with_one_fewer_repeats( self, scheduler): now = local_now() scheduler.schedule(now, id, interval=1000, repeat=1) scheduler.check_schedule() with scheduler.session_scope() as session: scheduled_job = scheduler._ns_query(session).one_or_none() repeat = scheduled_job.repeat assert repeat == 0
def test_scheduled_repeating_function_sets_new_job_at_interval(self, scheduler): now = local_now() scheduler.schedule(now, id, interval=1000, repeat=1) scheduler._now = lambda: now scheduler.check_schedule() with scheduler.session_scope() as session: scheduled_job = scheduler._ns_query(session).one_or_none() scheduled_time = scheduled_job.scheduled_time assert scheduled_time == naive_utc_datetime(now) + datetime.timedelta( seconds=1000 )
def test_enqueue_in_a_function_sets_time(self, scheduler): diff = datetime.timedelta(seconds=1000) now = local_now() scheduler._now = lambda: now job_id = scheduler.enqueue_in(diff, id) with scheduler.session_scope() as session: scheduled_job = (scheduler._ns_query(session).filter_by( id=job_id).one_or_none()) scheduled_time = scheduled_job.scheduled_time assert scheduled_time == naive_utc_datetime(now) + diff
def schedule_ping( server=DEFAULT_SERVER_URL, checkrate=DEFAULT_PING_CHECKRATE, interval=DEFAULT_PING_INTERVAL, ): # If pinging is not disabled by the environment if not conf.OPTIONS["Deployment"]["DISABLE_PING"]: started = local_now() _ping.enqueue_at( started, interval=interval * 60, repeat=None, kwargs=dict(started=started, server=server, checkrate=checkrate), )
def test_parse_attemptslog_create_on_new_attempt_with_no_notification( self, save_notifications, create_notification ): log = ContentSessionLogFactory( user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex ) now = local_now() masterylog = MasteryLog.objects.create( summarylog=self.summarylog1, user=self.user1, start_timestamp=now, mastery_level=1, complete=True, ) interactions = [{"type": "answer", "correct": 0} for _ in range(3)] attemptlog1 = AttemptLog.objects.create( masterylog=masterylog, sessionlog=log, user=self.user1, start_timestamp=now, end_timestamp=now, time_spent=1.0, complete=True, correct=1, hinted=False, error=False, interaction_history=[interactions[0]], ) parse_attemptslog(attemptlog1) assert save_notifications.called create_notification.assert_any_call( NotificationObjectType.Resource, NotificationEventType.Started, attemptlog1.user_id, self.classroom.id, assignment_collections=[self.classroom.id], lesson_id=self.lesson_id, contentnode_id=self.node_1.id, timestamp=attemptlog1.start_timestamp, ) create_notification.assert_any_call( NotificationObjectType.Lesson, NotificationEventType.Started, attemptlog1.user_id, self.classroom.id, assignment_collections=[self.classroom.id], lesson_id=self.lesson_id, timestamp=attemptlog1.start_timestamp, )
def get(self, request, format=None): info = {} info["version"] = kolibri.__version__ status, urls = get_urls() if not urls: # Will not return anything when running the debug server, so at least return the current URL urls = [ request.build_absolute_uri(OPTIONS["Deployment"]["URL_PATH_PREFIX"]) ] filtered_urls = [ url for url in urls if "127.0.0.1" not in url and "localhost" not in url ] if filtered_urls: urls = filtered_urls info["urls"] = urls db_engine = settings.DATABASES["default"]["ENGINE"] if db_engine.endswith("sqlite3"): # Return path to .sqlite file (usually in KOLIBRI_HOME folder) info["database_path"] = settings.DATABASES["default"]["NAME"] elif db_engine.endswith("postgresql"): info["database_path"] = "postgresql" else: info["database_path"] = "unknown" instance_model = InstanceIDModel.get_or_create_current_instance()[0] info["device_id"] = instance_model.id info["os"] = instance_model.platform info["content_storage_free_space"] = get_free_space( OPTIONS["Paths"]["CONTENT_DIR"] ) # This returns the localized time for the server info["server_time"] = local_now() # Returns the named timezone for the server (the time above only includes the offset) info["server_timezone"] = settings.TIME_ZONE info["installer"] = installation_type() info["python_version"] = "{major}.{minor}.{micro}".format( major=version_info.major, minor=version_info.minor, micro=version_info.micro ) return Response(info)
def schedule_ping( server=DEFAULT_SERVER_URL, checkrate=DEFAULT_PING_CHECKRATE, interval=DEFAULT_PING_INTERVAL, ): started = local_now() scheduler.schedule( started, _ping, interval=interval * 60, repeat=None, started=started, server=server, checkrate=checkrate, )
def handle(self, *args, **options): interval = float(options.get("interval") or DEFAULT_PING_INTERVAL) checkrate = float(options.get("checkrate") or DEFAULT_PING_CHECKRATE) server = options.get("server") or DEFAULT_SERVER_URL once = options.get("once") or False if once: started = local_now() try: ping_once(started, server) except Exception as e: raise CommandError(e) else: schedule_ping(server, checkrate, interval)
def test_scheduled_jobs_persist_on_restart( self, register_zeroconf_service, unregister_zeroconf_service, initialize_workers, scheduler, ): with mock.patch("kolibri.core.tasks.main.scheduler", wraps=scheduler): # Don't start scheduler in real, otherwise we may end up in infinite thread loop scheduler.start_scheduler = mock.MagicMock(name="start_scheduler") # Schedule two userdefined jobs from kolibri.utils.time_utils import local_now from datetime import timedelta schedule_time = local_now() + timedelta(hours=1) scheduler.schedule(schedule_time, id, job_id="test01") scheduler.schedule(schedule_time, id, job_id="test02") # Now, start services plugin service_plugin = server.ServicesPlugin(mock.MagicMock(name="bus"), 1234) service_plugin.START() # Currently, we must have exactly four scheduled jobs # two userdefined and two server defined (pingback and vacuum) from kolibri.core.analytics.utils import DEFAULT_PING_JOB_ID from kolibri.core.deviceadmin.utils import SCH_VACUUM_JOB_ID assert scheduler.count() == 4 assert scheduler.get_job("test01") is not None assert scheduler.get_job("test02") is not None assert scheduler.get_job(DEFAULT_PING_JOB_ID) is not None assert scheduler.get_job(SCH_VACUUM_JOB_ID) is not None # Restart services service_plugin.STOP() service_plugin.START() # Make sure all scheduled jobs persist after restart assert scheduler.count() == 4 assert scheduler.get_job("test01") is not None assert scheduler.get_job("test02") is not None assert scheduler.get_job(DEFAULT_PING_JOB_ID) is not None assert scheduler.get_job(SCH_VACUUM_JOB_ID) is not None
def update_log(cls, user): """ Update the current UserSessionLog for a particular user. """ if user and isinstance(user, FacilityUser): try: user_session_log = cls.objects.filter( user=user).latest("last_interaction_timestamp") except ObjectDoesNotExist: user_session_log = None if (not user_session_log or timezone.now() - user_session_log.last_interaction_timestamp > timedelta(minutes=5)): user_session_log = cls(user=user) user_session_log.last_interaction_timestamp = local_now() user_session_log.save()
def get(self, request, format=None): info = {} info["version"] = kolibri.__version__ status, urls = get_urls() if not urls: # Will not return anything when running the debug server, so at least return the current URL urls = [ request.build_absolute_uri( OPTIONS["Deployment"]["URL_PATH_PREFIX"]) ] filtered_urls = [ url for url in urls if "127.0.0.1" not in url and "localhost" not in url ] if filtered_urls: urls = filtered_urls info["urls"] = urls if settings.DATABASES["default"]["ENGINE"].endswith("sqlite3"): # If any other database backend, will not be file backed, so no database path to return info["database_path"] = settings.DATABASES["default"]["NAME"] instance_model = InstanceIDModel.get_or_create_current_instance()[0] info["device_name"] = instance_model.hostname info["device_id"] = instance_model.id info["os"] = instance_model.platform info["content_storage_free_space"] = get_free_space( OPTIONS["Paths"]["CONTENT_DIR"]) # This returns the localized time for the server info["server_time"] = local_now() # Returns the named timezone for the server (the time above only includes the offset) info["server_timezone"] = settings.TIME_ZONE info["installer"] = installation_type() return Response(info)
def import_channel_from_local_db(channel_id, cancel_check=None): import_manager = initialize_import_manager(channel_id, cancel_check=cancel_check) import_manager.import_channel_data() import_manager.end() update_content_metadata(channel_id) channel = ChannelMetadata.objects.get(id=channel_id) channel.last_updated = local_now() try: assert channel.root except ContentNode.DoesNotExist: node_id = channel.root_id ContentNode.objects.create( id=node_id, title=channel.name, content_id=node_id, channel_id=channel_id ) channel.save()
def import_channel_from_local_db(channel_id, cancel_check=None): import_manager = initialize_import_manager(channel_id, cancel_check=cancel_check) import_ran = import_manager.import_channel_data() import_manager.end() channel = ChannelMetadata.objects.get(id=channel_id) channel.last_updated = local_now() try: assert channel.root except ContentNode.DoesNotExist: node_id = channel.root_id ContentNode.objects.create( id=node_id, title=channel.name, content_id=node_id, channel_id=channel_id ) channel.save() logger.info("Channel {} successfully imported into the database".format(channel_id)) return import_ran
def _ping(started, server, checkrate): try: ping_once(started, server=server) connection.close() return except ConnectionError: logger.warn( "Ping failed (could not connect). Trying again in {} minutes.". format(checkrate)) except Timeout: logger.warn( "Ping failed (connection timed out). Trying again in {} minutes.". format(checkrate)) except RequestException as e: logger.warn("Ping failed ({})! Trying again in {} minutes.".format( e, checkrate)) connection.close() job = get_current_job() if job and job in scheduler: scheduler.change_execution_time( job, local_now() + datetime.timedelta(seconds=checkrate * 60))
def test_parse_attemptslog_update_attempt_with_three_wrong_attempts_no_started( self, save_notifications, create_notification ): log = ContentSessionLogFactory( user=self.user1, content_id=uuid.uuid4().hex, channel_id=uuid.uuid4().hex ) now = local_now() masterylog = MasteryLog.objects.create( summarylog=self.summarylog1, user=self.user1, start_timestamp=now, mastery_level=1, complete=True, ) interactions = [{"type": "answer", "correct": 0}] AttemptLog.objects.create( masterylog=masterylog, sessionlog=log, user=self.user1, start_timestamp=now, end_timestamp=now, time_spent=1.0, complete=True, correct=0, hinted=False, error=False, interaction_history=interactions, ) AttemptLog.objects.create( masterylog=masterylog, sessionlog=log, user=self.user1, start_timestamp=now, end_timestamp=now, time_spent=1.0, complete=True, correct=0, hinted=False, error=False, interaction_history=interactions, ) # more than 3 attempts will trigger the help notification interactions.append({"type": "answer", "correct": 0}) attemptlog3 = AttemptLog.objects.create( masterylog=masterylog, sessionlog=log, user=self.user1, start_timestamp=now, end_timestamp=now, time_spent=1.0, complete=True, correct=0, hinted=False, error=False, interaction_history=interactions, ) parse_attemptslog(attemptlog3) assert save_notifications.called create_notification.assert_any_call( NotificationObjectType.Resource, NotificationEventType.Help, attemptlog3.user_id, self.classroom.id, assignment_collections=[self.classroom.id], lesson_id=self.lesson_id, contentnode_id=self.node_1.id, reason=HelpReason.Multiple, timestamp=attemptlog3.start_timestamp, ) create_notification.assert_any_call( NotificationObjectType.Resource, NotificationEventType.Started, attemptlog3.user_id, self.classroom.id, assignment_collections=[self.classroom.id], lesson_id=self.lesson_id, contentnode_id=self.node_1.id, timestamp=attemptlog3.start_timestamp, )
def test_scheduled_repeating_function_enqueues_job(self, scheduler): now = local_now() job_id = scheduler.schedule(now, id, interval=1000, repeat=None) scheduler.check_schedule() assert scheduler.queue.fetch_job(job_id).job_id == job_id
def create(self, validated_data): instance = super(ExamLogSerializer, self).create(validated_data) # to check if a notification must be created: wrap_to_save_queue(create_examlog, instance, local_now()) return instance
def test_enqueue_at_a_function(self, scheduler): job_id = scheduler.enqueue_at(local_now(), id) # is the job recorded in the chosen backend? assert scheduler.get_job(job_id).job_id == job_id
def test_scheduled_repeating_function_updates_old_job(self, scheduler): now = local_now() old_id = scheduler.schedule(now, id, interval=1000, repeat=None) scheduler.check_schedule() new_id = scheduler.get_jobs()[0].job_id assert old_id == new_id
def test_enqueue_at_preserves_extra_metadata(self, scheduler): metadata = {"saved": True} job_id = scheduler.enqueue_at(local_now(), id, extra_metadata=metadata) # Do we get back the metadata we save? assert scheduler.get_job(job_id).extra_metadata == metadata