예제 #1
0
def query_logs(users, items, logtype, logdict):
    """
    Get a specified subset of logs for a particular set of users for either exercises or videos.
    users: list of users to query against.
    items: list of either exercises of videos to query.
    logtype: video or exercise - in future this could be expanded to query activity logs too.
    logdict: user keyed dictionary of logs (presumed to be empty by this code)
    """

    if logtype == "exercise":
        all_logs = ExerciseLog.objects.filter(user__in=users, exercise_id__in=items).values(
                        'user', 'complete', 'exercise_id', 'attempts', 'points', 'struggling', 'completion_timestamp', 'streak_progress').order_by('completion_timestamp')
    elif logtype == "video":
        all_logs = VideoLog.objects.filter(user__in=users, video_id__in=items).values(
            'user', 'complete', 'video_id', 'total_seconds_watched', 'completion_timestamp', 'points').order_by('completion_timestamp')
    elif logtype == "activity" and UserLog.is_enabled():
        all_logs = UserLog.objects.filter(user__in=users).values(
            'user', 'last_active_datetime', 'total_seconds').order_by('last_active_datetime')
    elif logtype == "summaryactivity" and UserLog.is_enabled():
        all_logs = UserLogSummary.objects.filter(user__in=users).values(
            'user', 'device', 'total_seconds').order_by('end_datetime')
    else:
        assert False, "Unknown log type: '%s'" % logtype  # indicates a programming error

    for log in all_logs:
        logdict[log['user']].append(log)
    return logdict
예제 #2
0
def query_logs(users, items, logtype, logdict):
    """
    Get a specified subset of logs for a particular set of users for either exercises or videos.
    users: list of users to query against.
    items: list of either exercises of videos to query.
    logtype: video or exercise - in future this could be expanded to query activity logs too.
    logdict: user keyed dictionary of logs (presumed to be empty by this code)
    """

    if logtype == "exercise":
        all_logs = ExerciseLog.objects.filter(
            user__in=users, exercise_id__in=items).values(
                'user', 'complete', 'exercise_id', 'attempts', 'points',
                'struggling', 'completion_timestamp',
                'streak_progress').order_by('completion_timestamp')
    elif logtype == "video":
        all_logs = VideoLog.objects.filter(
            user__in=users, youtube_id__in=items).values(
                'user', 'complete', 'youtube_id', 'total_seconds_watched',
                'completion_timestamp',
                'points').order_by('completion_timestamp')
    elif logtype == "activity" and UserLog.is_enabled():
        all_logs = UserLog.objects.filter(user__in=users).values(
            'user', 'last_active_datetime',
            'total_seconds').order_by('last_active_datetime')
    elif logtype == "summaryactivity" and UserLog.is_enabled():
        all_logs = UserLogSummary.objects.filter(user__in=users).values(
            'user', 'device', 'total_seconds').order_by('end_datetime')
    else:
        raise Exception("Unknown log type: '%s'" % logtype)
    for log in all_logs:
        logdict[log['user']].append(log)
    return logdict
예제 #3
0
    def test_query_login_teacher(self):
        """Check the # of queries when logging in as a teacher."""
        teacher = FacilityUser(is_teacher=True, username="******", facility=self.facility)
        passwd = self._gen_valid_password()
        teacher.set_password(passwd)
        teacher.save()

        with self.assertNumQueries(39 + 3*UserLog.is_enabled()):
            self.browser_login_teacher("t1", passwd, self.facility)
예제 #4
0
    def test_query_login_student(self):
        """Check the # of queries when logging in as a student."""
        student = FacilityUser(is_teacher=False, username="******", facility=self.facility)
        passwd = self._gen_valid_password()
        student.set_password(passwd)
        student.save()

        with self.assertNumQueries(39 + 3*UserLog.is_enabled()):
            self.browser_login_student("s1", passwd, self.facility)
예제 #5
0
    def test_query_login_teacher(self):
        """Check the # of queries when logging in as a teacher."""
        teacher = FacilityUser(is_teacher=True,
                               username="******",
                               facility=self.facility)
        passwd = self._gen_valid_password()
        teacher.set_password(passwd)
        teacher.save()

        with self.assertNumQueries(39 + 3 * UserLog.is_enabled()):
            self.browser_login_teacher("t1", passwd, self.facility)
예제 #6
0
    def test_query_login_student(self):
        """Check the # of queries when logging in as a student."""
        student = FacilityUser(is_teacher=False,
                               username="******",
                               facility=self.facility)
        passwd = self._gen_valid_password()
        student.set_password(passwd)
        student.save()

        with self.assertNumQueries(39 + 3 * UserLog.is_enabled()):
            self.browser_login_student("s1", passwd, self.facility)
예제 #7
0
 def test_query_login_admin(self):
     with self.assertNumQueries(38 + 0*UserLog.is_enabled()):
         self.browser_login_admin()
def generate_fake_exercise_logs(facility_user=None, topics=topics, start_date=datetime.datetime.now() - datetime.timedelta(days=30 * 6)):
    """Add exercise logs for the given topics, for each of the given users.
    If no users are given, they are created.
    If no topics exist, they are taken from the list at the top of this file.

    By default, users start learning randomly between 6 months ago and now.
    """

    own_device = Device.get_own_device()
    date_diff = datetime.datetime.now() - start_date
    exercise_logs = []
    user_logs = []

    # It's not a user: probably a list.
    # Recursive case
    if not hasattr(facility_user, "username"):
        # It's NONE :-/ generate the users first!
        if not facility_user:
            (facility_user, _, _) = generate_fake_facility_users()

        for topic in topics:
            for user in facility_user:
                (elogs, ulogs) = generate_fake_exercise_logs(facility_user=user, topics=[topic], start_date=start_date)
                exercise_logs.append(elogs)
                user_logs.append(ulogs)

    # Actually generate!
    else:
        # Get (or create) user type
        try:
            user_settings = json.loads(facility_user.notes)
        except:
            user_settings = sample_user_settings()
            facility_user.notes = json.dumps(user_settings)
            facility_user.save()
        date_diff_started = datetime.timedelta(seconds=datediff(date_diff, units="seconds") * user_settings["time_in_program"])  # when this user started in the program, relative to NOW

        for topic in topics:
            # Get all exercises related to the topic
            exercises = get_topic_exercises(topic_id=topic)

            # Problem:
            #   Not realistic for students to have lots of unfinished exercises.
            #   If they start them, they tend to get stuck, right?
            #
            # So, need to make it more probable that they will finish an exercise,
            #   and less probable that they start one.
            #
            # What we need is P(streak|started), not P(streak)

            # Probability of doing any particular exercise
            p_exercise = probability_of(qty="exercise", user_settings=user_settings)
            logging.debug("# exercises: %d; p(exercise)=%4.3f, user settings: %s\n" % (len(exercises), p_exercise, json.dumps(user_settings)))

            # of exercises is related to
            for j, exercise in enumerate(exercises):
                if random.random() > p_exercise:
                    continue

                # Probability of completing this exercise, and .. proportion of attempts
                p_completed = probability_of(qty="completed", user_settings=user_settings)
                p_attempts = probability_of(qty="attempts", user_settings=user_settings)

                attempts = int(random.random() * p_attempts * 30 + 10)  # always enough to have completed
                completed = (random.random() < p_completed)
                if completed:
                    streak_progress = 100
                else:
                    streak_progress = max(0, min(90, random.gauss(100 * user_settings["speed_of_learning"], 20)))
                    streak_progress = int(floor(streak_progress / 10.)) * 10
                points = streak_progress / 10 * 12 if completed else 0  # only get points when you master.

                # Choose a rate of exercises, based on their effort level and speed of learning.
                #   Compute the latest possible start time.
                #   Then sample a start time between their start time
                #   and the latest possible start_time
                rate_of_exercises = 0.66 * user_settings["effort_level"] + 0.33 * user_settings["speed_of_learning"]  # exercises per day
                time_for_attempts = min(datetime.timedelta(days=rate_of_exercises * attempts), date_diff_started)  # protect with min
                time_delta_completed = datetime.timedelta(seconds=random.randint(int(datediff(time_for_attempts, units="seconds")), int(datediff(date_diff_started, units="seconds"))))
                date_completed = datetime.datetime.now() - time_delta_completed

                # Always create new
                logging.info("Creating exercise log: %-12s: %-25s (%d points, %d attempts, %d%% streak on %s)" % (
                    facility_user.first_name,
                    exercise["name"],
                    points,
                    attempts,
                    streak_progress,
                    date_completed,
                ))
                try:
                    elog = ExerciseLog.objects.get(user=facility_user, exercise_id=exercise["name"])
                except ExerciseLog.DoesNotExist:
                    elog = ExerciseLog(
                        user=facility_user,
                        exercise_id=exercise["name"],
                        attempts=int(attempts),
                        streak_progress=streak_progress,
                        points=int(points),
                        complete=completed,
                        completion_timestamp=date_completed,
                    )
                    elog.save(update_userlog=False)

                    # For now, make all attempts on an exercise into a single UserLog.
                    seconds_per_attempt = 10 * (1 + user_settings["speed_of_learning"] * random.random())
                    time_to_navigate = 15 * (0.5 + random.random())  #between 7.5s and 22.5s
                    time_to_logout = 5 * (0.5 + random.random()) # between 2.5 and 7.5s
                    if UserLog.is_enabled():
                        ulog = UserLog(
                            user=facility_user,
                            activity_type=1,
                            start_datetime = date_completed - datetime.timedelta(seconds=int(attempts * seconds_per_attempt + time_to_navigate)),
                            end_datetime = date_completed + datetime.timedelta(seconds=time_to_logout),
                            last_active_datetime = date_completed,
                        )
                        ulog.save()
                        user_logs.append(ulog)
                exercise_logs.append(elog)

    return (exercise_logs, user_logs)
예제 #9
0
def compute_data(data_types, who, where):
    """
    Compute the data in "data_types" for each user in "who", for the topics selected by "where"

    who: list of users
    where: topic_path
    data_types can include:
        pct_mastery
        effort
        attempts
    """

    # None indicates that the data hasn't been queried yet.
    #   We'll query it on demand, for efficiency
    topics = None
    exercises = None
    videos = None

    # Initialize an empty dictionary of data, video logs, exercise logs, for each user
    data = OrderedDict(zip([w.id for w in who], [dict() for i in range(len(who))]))  # maintain the order of the users
    vid_logs = dict(zip([w.id for w in who], [[] for i in range(len(who))]))
    ex_logs = dict(zip([w.id for w in who], [[] for i in range(len(who))]))
    if UserLog.is_enabled():
        activity_logs = dict(zip([w.id for w in who], [[] for i in range(len(who))]))

    # Set up queries (but don't run them), so we have really easy aliases.
    #   Only do them if they haven't been done yet (tell this by passing in a value to the lambda function)
    # Topics: topics.
    # Exercises: names (ids for ExerciseLog objects)
    # Videos: video_id (ids for VideoLog objects)

    # This lambda partial creates a function to return all items with a particular path from the NODE_CACHE.
    search_fun_single_path = partial(lambda t, p: t["path"].startswith(p), p=tuple(where))
    # This lambda partial creates a function to return all items with paths matching a list of paths from NODE_CACHE.
    search_fun_multi_path = partial(lambda ts, p: any([t["path"].startswith(p) for t in ts]),  p=tuple(where))
    # Functions that use the functions defined above to return topics, exercises, and videos based on paths.
    query_topics = partial(lambda t, sf: t if t is not None else [t[0]["id"] for t in filter(sf, get_node_cache('Topic').values())], sf=search_fun_single_path)
    query_exercises = partial(lambda e, sf: e if e is not None else [ex[0]["id"] for ex in filter(sf, get_node_cache('Exercise').values())], sf=search_fun_multi_path)
    query_videos = partial(lambda v, sf: v if v is not None else [vid[0]["id"] for vid in filter(sf, get_node_cache('Video').values())], sf=search_fun_multi_path)

    # No users, don't bother.
    if len(who) > 0:

        # Query out all exercises, videos, exercise logs, and video logs before looping to limit requests.
        # This means we could pull data for n-dimensional coach report displays with the same number of requests!
        # Note: User activity is polled inside the loop, to prevent possible slowdown for exercise and video reports.
        exercises = query_exercises(exercises)

        videos = query_videos(videos)

        if exercises:
            ex_logs = query_logs(data.keys(), exercises, "exercise", ex_logs)

        if videos:
            vid_logs = query_logs(data.keys(), videos, "video", vid_logs)

        for data_type in (data_types if not hasattr(data_types, "lower") else [data_types]):  # convert list from string, if necessary
            if data_type in data[data.keys()[0]]:  # if the first user has it, then all do; no need to calc again.
                continue

            #
            # These are summary stats: you only get one per user
            #
            if data_type == "pct_mastery":

                # Efficient query out, spread out to dict
                for user in data.keys():
                    data[user][data_type] = 0 if not ex_logs[user] else 100. * sum([el['complete'] for el in ex_logs[user]]) / float(len(exercises))

            elif data_type == "effort":
                if "ex:attempts" in data[data.keys()[0]] and "vid:total_seconds_watched" in data[data.keys()[0]]:
                    # exercises and videos would be initialized already
                    for user in data.keys():
                        avg_attempts = 0 if len(exercises) == 0 else sum(data[user]["ex:attempts"].values()) / float(len(exercises))
                        avg_video_points = 0 if len(videos) == 0 else sum(data[user]["vid:total_seconds_watched"].values()) / float(len(videos))
                        data[user][data_type] = 100. * (0.5 * avg_attempts / 10. + 0.5 * avg_video_points / 750.)
                else:
                    data_types += ["ex:attempts", "vid:total_seconds_watched", "effort"]

            #
            # These are detail stats: you get many per user
            #
            # Just querying out data directly: Video
            elif data_type.startswith("vid:") and data_type[4:] in [f.name for f in VideoLog._meta.fields]:

                for user in data.keys():
                    data[user][data_type] = OrderedDict([(v['video_id'], v[data_type[4:]]) for v in vid_logs[user]])

            # Just querying out data directly: Exercise
            elif data_type.startswith("ex:") and data_type[3:] in [f.name for f in ExerciseLog._meta.fields]:

                for user in data.keys():
                    data[user][data_type] = OrderedDict([(el['exercise_id'], el[data_type[3:]]) for el in ex_logs[user]])

            # User Log Queries
            elif data_type.startswith("user:"******"", "activity", activity_logs)

                for user in data.keys():
                    data[user][data_type] = [log[data_type[5:]] for log in activity_logs[user]]

            # User Summary Queries
            elif data_type.startswith("usersum:") and data_type[8:] in [f.name for f in UserLogSummary._meta.fields] and UserLog.is_enabled():

                activity_logs = query_logs(data.keys(), "", "summaryactivity", activity_logs)

                for user in data.keys():
                    data[user][data_type] = sum([log[data_type[8:]] for log in activity_logs[user]])
            # Unknown requested quantity
            else:
                raise Exception("Unknown type: '%s' not in %s" % (data_type, str([f.name for f in ExerciseLog._meta.fields])))

    # Returning empty list instead of None allows javascript on client
    # side to read 'length' property without error.
    exercises = exercises or []

    videos = videos or []

    return {
        "data": data,
        "topics": topics,
        "exercises": exercises,
        "videos": videos,
    }
예제 #10
0
 def test_query_logout_teacher(self):
     """"""
     self.test_query_login_teacher()
     with self.assertNumQueries(16 + 11*UserLog.is_enabled()):
         self.browser_logout_user()
예제 #11
0
def compute_data(data_types, who, where):
    """
    Compute the data in "data_types" for each user in "who", for the topics selected by "where"

    who: list of users
    where: topic_path
    data_types can include:
        pct_mastery
        effort
        attempts
    """

    # None indicates that the data hasn't been queried yet.
    #   We'll query it on demand, for efficiency
    topics = None
    exercises = None
    videos = None

    # Initialize an empty dictionary of data, video logs, exercise logs, for each user
    data = OrderedDict(
        zip([w.id for w in who],
            [dict()
             for i in range(len(who))]))  # maintain the order of the users
    vid_logs = dict(zip([w.id for w in who], [[] for i in range(len(who))]))
    ex_logs = dict(zip([w.id for w in who], [[] for i in range(len(who))]))
    if UserLog.is_enabled():
        activity_logs = dict(
            zip([w.id for w in who], [[] for i in range(len(who))]))

    # Set up queries (but don't run them), so we have really easy aliases.
    #   Only do them if they haven't been done yet (tell this by passing in a value to the lambda function)
    # Topics: topics.
    # Exercises: names (ids for ExerciseLog objects)
    # Videos: youtube_id (ids for VideoLog objects)

    # This lambda partial creates a function to return all items with a particular path from the NODECACHE.
    search_fun_single_path = partial(lambda t, p: t["path"].startswith(p),
                                     p=tuple(where))
    # This lambda partial creates a function to return all items with paths matching a list of paths from NODECACHE.
    search_fun_multi_path = partial(
        lambda t, p: any([tp.startswith(p) for tp in t["paths"]]),
        p=tuple(where))
    # Functions that use the functions defined above to return topics, exercises, and videos based on paths.
    query_topics = partial(
        lambda t, sf: t if t is not None else
        [t for t in filter(sf, topicdata.NODE_CACHE['Topic'].values())],
        sf=search_fun_single_path)
    query_exercises = partial(lambda e, sf: e if e is not None else [
        ex["name"]
        for ex in filter(sf, topicdata.NODE_CACHE['Exercise'].values())
    ],
                              sf=search_fun_multi_path)
    query_videos = partial(lambda v, sf: v if v is not None else [
        vid["youtube_id"]
        for vid in filter(sf, topicdata.NODE_CACHE['Video'].values())
    ],
                           sf=search_fun_multi_path)

    # No users, don't bother.
    if len(who) > 0:

        # Query out all exercises, videos, exercise logs, and video logs before looping to limit requests.
        # This means we could pull data for n-dimensional coach report displays with the same number of requests!
        # Note: User activity is polled inside the loop, to prevent possible slowdown for exercise and video reports.
        exercises = query_exercises(exercises)

        videos = query_videos(videos)

        if exercises:
            ex_logs = query_logs(data.keys(), exercises, "exercise", ex_logs)

        if videos:
            vid_logs = query_logs(data.keys(), videos, "video", vid_logs)

        for data_type in (data_types if not hasattr(data_types, "lower") else [
                data_types
        ]):  # convert list from string, if necessary
            if data_type in data[data.keys(
            )[0]]:  # if the first user has it, then all do; no need to calc again.
                continue

            #
            # These are summary stats: you only get one per user
            #
            if data_type == "pct_mastery":

                # Efficient query out, spread out to dict
                for user in data.keys():
                    data[user][
                        data_type] = 0 if not ex_logs[user] else 100. * sum(
                            [el['complete']
                             for el in ex_logs[user]]) / float(len(exercises))

            elif data_type == "effort":
                if "ex:attempts" in data[data.keys(
                )[0]] and "vid:total_seconds_watched" in data[data.keys()[0]]:
                    # exercises and videos would be initialized already
                    for user in data.keys():
                        avg_attempts = 0 if len(exercises) == 0 else sum(
                            data[user]["ex:attempts"].values()) / float(
                                len(exercises))
                        avg_video_points = 0 if len(videos) == 0 else sum(
                            data[user]["vid:total_seconds_watched"].values(
                            )) / float(len(videos))
                        data[user][data_type] = 100. * (
                            0.5 * avg_attempts / 10. +
                            0.5 * avg_video_points / 750.)
                else:
                    data_types += [
                        "ex:attempts", "vid:total_seconds_watched", "effort"
                    ]

            #
            # These are detail stats: you get many per user
            #
            # Just querying out data directly: Video
            elif data_type.startswith("vid:") and data_type[4:] in [
                    f.name for f in VideoLog._meta.fields
            ]:

                for user in data.keys():
                    data[user][data_type] = OrderedDict([
                        (v['youtube_id'], v[data_type[4:]])
                        for v in vid_logs[user]
                    ])

            # Just querying out data directly: Exercise
            elif data_type.startswith("ex:") and data_type[3:] in [
                    f.name for f in ExerciseLog._meta.fields
            ]:

                for user in data.keys():
                    data[user][data_type] = OrderedDict([
                        (el['exercise_id'], el[data_type[3:]])
                        for el in ex_logs[user]
                    ])

            # User Log Queries
            elif data_type.startswith("user:"******"", "activity",
                                           activity_logs)

                for user in data.keys():
                    data[user][data_type] = [
                        log[data_type[5:]] for log in activity_logs[user]
                    ]

            # User Summary Queries
            elif data_type.startswith("usersum:") and data_type[8:] in [
                    f.name for f in UserLogSummary._meta.fields
            ] and UserLog.is_enabled():

                activity_logs = query_logs(data.keys(), "", "summaryactivity",
                                           activity_logs)

                for user in data.keys():
                    data[user][data_type] = sum(
                        [log[data_type[8:]] for log in activity_logs[user]])
            # Unknown requested quantity
            else:
                raise Exception(
                    "Unknown type: '%s' not in %s" %
                    (data_type, str([f.name
                                     for f in ExerciseLog._meta.fields])))

    # Returning empty list instead of None allows javascript on client
    # side to read 'length' property without error.
    exercises = exercises or []

    videos = videos or []

    return {
        "data": data,
        "topics": topics,
        "exercises": exercises,
        "videos": videos,
    }
예제 #12
0
        "key": "user:total_seconds",
        "name": _("Active Time Per Login"),
        "type": "number",
        "description": "Duration of each login session.",
        "noscatter": True,
        "timeline": True
    },
    {
        "key": "user:last_active_datetime",
        "name": _("Time Session Completed"),
        "type": "datetime",
        "description": _("Day/time the login session finished.")
    },
]

if UserLog.is_enabled():
    stats_dict.extend(user_log_stats_dict)


def get_data_form(request, *args, **kwargs):
    """Get the basic data form, by combining information from
    keyword arguments and the request.REQUEST object.
    Along the way, check permissions to make sure whatever's being requested is OK.

    Request objects get priority over keyword args.
    """
    assert not args, "all non-request args should be keyword args"

    # Pull the form parameters out of the request or
    data = dict()
    # Default to empty string, as it makes template handling cleaner later.
예제 #13
0
 def test_query_logout_student(self):
     """"""
     self.test_query_login_student()
     with self.assertNumQueries(14 + 11 * UserLog.is_enabled()):
         self.browser_logout_user()
예제 #14
0
 def test_query_logout_teacher(self):
     """"""
     self.test_query_login_teacher()
     with self.assertNumQueries(16 + 11 * UserLog.is_enabled()):
         self.browser_logout_user()
예제 #15
0
 def test_query_logout_admin(self):
     """"""
     self.test_query_login_admin()
     with self.assertNumQueries(17 + 0 * UserLog.is_enabled()):
         self.browser_logout_user()
예제 #16
0
def generate_fake_exercise_logs(facility_user=None,
                                topics=topics,
                                start_date=datetime.datetime.now() -
                                datetime.timedelta(days=30 * 6)):
    """Add exercise logs for the given topics, for each of the given users.
    If no users are given, they are created.
    If no topics exist, they are taken from the list at the top of this file.

    By default, users start learning randomly between 6 months ago and now.
    """

    own_device = Device.get_own_device()
    date_diff = datetime.datetime.now() - start_date
    exercise_logs = []
    user_logs = []

    # It's not a user: probably a list.
    # Recursive case
    if not hasattr(facility_user, "username"):
        # It's NONE :-/ generate the users first!
        if not facility_user:
            (facility_user, _, _) = generate_fake_facility_users()

        for topic in topics:
            for user in facility_user:
                (elogs,
                 ulogs) = generate_fake_exercise_logs(facility_user=user,
                                                      topics=[topic],
                                                      start_date=start_date)
                exercise_logs.append(elogs)
                user_logs.append(ulogs)

    # Actually generate!
    else:
        # Get (or create) user type
        try:
            user_settings = json.loads(facility_user.notes)
        except:
            user_settings = sample_user_settings()
            facility_user.notes = json.dumps(user_settings)
            facility_user.save()
        date_diff_started = datetime.timedelta(
            seconds=datediff(date_diff, units="seconds") *
            user_settings["time_in_program"]
        )  # when this user started in the program, relative to NOW

        for topic in topics:
            # Get all exercises related to the topic
            exercises = get_topic_exercises(topic_id=topic)

            # Problem:
            #   Not realistic for students to have lots of unfinished exercises.
            #   If they start them, they tend to get stuck, right?
            #
            # So, need to make it more probable that they will finish an exercise,
            #   and less probable that they start one.
            #
            # What we need is P(streak|started), not P(streak)

            # Probability of doing any particular exercise
            p_exercise = probability_of(qty="exercise",
                                        user_settings=user_settings)
            logging.debug(
                "# exercises: %d; p(exercise)=%4.3f, user settings: %s\n" %
                (len(exercises), p_exercise, json.dumps(user_settings)))

            # of exercises is related to
            for j, exercise in enumerate(exercises):
                if random.random() > p_exercise:
                    continue

                # Probability of completing this exercise, and .. proportion of attempts
                p_completed = probability_of(qty="completed",
                                             user_settings=user_settings)
                p_attempts = probability_of(qty="attempts",
                                            user_settings=user_settings)

                attempts = int(random.random() * p_attempts * 30 +
                               10)  # always enough to have completed
                completed = (random.random() < p_completed)
                if completed:
                    streak_progress = 100
                else:
                    streak_progress = max(
                        0,
                        min(
                            90,
                            random.gauss(
                                100 * user_settings["speed_of_learning"], 20)))
                    streak_progress = int(floor(streak_progress / 10.)) * 10
                points = streak_progress / 10 * 12 if completed else 0  # only get points when you master.

                # Choose a rate of exercises, based on their effort level and speed of learning.
                #   Compute the latest possible start time.
                #   Then sample a start time between their start time
                #   and the latest possible start_time
                rate_of_exercises = 0.66 * user_settings[
                    "effort_level"] + 0.33 * user_settings[
                        "speed_of_learning"]  # exercises per day
                time_for_attempts = min(
                    datetime.timedelta(days=rate_of_exercises * attempts),
                    date_diff_started)  # protect with min
                time_delta_completed = datetime.timedelta(
                    seconds=random.randint(
                        int(datediff(time_for_attempts, units="seconds")),
                        int(datediff(date_diff_started, units="seconds"))))
                date_completed = datetime.datetime.now() - time_delta_completed

                # Always create new
                logging.info(
                    "Creating exercise log: %-12s: %-25s (%d points, %d attempts, %d%% streak on %s)"
                    % (
                        facility_user.first_name,
                        exercise["name"],
                        points,
                        attempts,
                        streak_progress,
                        date_completed,
                    ))
                try:
                    elog = ExerciseLog.objects.get(
                        user=facility_user, exercise_id=exercise["name"])
                except ExerciseLog.DoesNotExist:
                    elog = ExerciseLog(
                        user=facility_user,
                        exercise_id=exercise["name"],
                        attempts=int(attempts),
                        streak_progress=streak_progress,
                        points=int(points),
                        complete=completed,
                        completion_timestamp=date_completed,
                    )
                    elog.save(update_userlog=False)

                    # For now, make all attempts on an exercise into a single UserLog.
                    seconds_per_attempt = 10 * (
                        1 +
                        user_settings["speed_of_learning"] * random.random())
                    time_to_navigate = 15 * (0.5 + random.random()
                                             )  #between 7.5s and 22.5s
                    time_to_logout = 5 * (0.5 + random.random()
                                          )  # between 2.5 and 7.5s
                    if UserLog.is_enabled():
                        ulog = UserLog(
                            user=facility_user,
                            activity_type=1,
                            start_datetime=date_completed - datetime.timedelta(
                                seconds=int(attempts * seconds_per_attempt +
                                            time_to_navigate)),
                            end_datetime=date_completed +
                            datetime.timedelta(seconds=time_to_logout),
                            last_active_datetime=date_completed,
                        )
                        ulog.save()
                        user_logs.append(ulog)
                exercise_logs.append(elog)

    return (exercise_logs, user_logs)
예제 #17
0
 def test_query_logout_admin(self):
     """"""
     self.test_query_login_admin()
     with self.assertNumQueries(17 + 0*UserLog.is_enabled()):
         self.browser_logout_user()
예제 #18
0
    {"key": "ex:attempts",        "name": _("Average attempts"),   "type": "number", "description": _("Number of times submitting an answer to an exercise.")},
    {"key": "ex:streak_progress", "name": _("Average streak"),     "type": "number", "description": _("Maximum number of consecutive correct answers on an exercise.")},
    {"key": "ex:points",          "name": _("Exercise points"),    "type": "number", "description": _("[Pointless at the moment; tracks mastery linearly]")},
    { "key": "ex:completion_timestamp", "name": _("Time exercise completed"),"type": "datetime", "description": _("Day/time the exercise was completed.") },
    {"key": "vid:points",          "name": _("Video points"),      "type": "number", "description": _("Points earned while watching a video (750 max / video).")},
    { "key": "vid:total_seconds_watched","name": _("Video time"),   "type": "number", "description": _("Total seconds spent watching a video.") },
    { "key": "vid:completion_timestamp", "name": _("Time video completed"),"type": "datetime", "description": _("Day/time the video was completed.") },
]

user_log_stats_dict = [
    { "key": "usersum:total_seconds", "name": _("Time Active (s)"), "type": "number", "description": _("Total time spent actively logged in.")},
    { "key": "user:total_seconds", "name": _("Active Time Per Login"), "type": "number", "description": _("Duration of each login session."), "noscatter": True, "timeline": True},
    { "key": "user:last_active_datetime", "name": _("Time Session Completed"),"type": "datetime", "description": _("Day/time the login session finished.")},
]

if UserLog.is_enabled():
    stats_dict.extend(user_log_stats_dict)

def get_data_form(request, *args, **kwargs):
    """Get the basic data form, by combining information from
    keyword arguments and the request.REQUEST object.
    Along the way, check permissions to make sure whatever's being requested is OK.

    Request objects get priority over keyword args.
    """
    assert not args, "all non-request args should be keyword args"

    # Pull the form parameters out of the request or
    data = dict()
    # Default to empty string, as it makes template handling cleaner later.
    for field in ["facility", "group", "user", "xaxis", "yaxis"]:
예제 #19
0
 def test_query_logout_student(self):
     """"""
     self.test_query_login_student()
     with self.assertNumQueries(14 + 11*UserLog.is_enabled()):
         self.browser_logout_user()
예제 #20
0
 def test_query_login_admin(self):
     with self.assertNumQueries(38 + 0 * UserLog.is_enabled()):
         self.browser_login_admin()