def small_concepts(request, skill_identifier): if skill_identifier not in SKILL_TABLES: return JsonResponse({'msg': 'no_table'}) skill = get_object_or_404(Skill, identifier=skill_identifier) skills = Skill.objects.filter(item__child_relations__parent=skill.item_id, active=True) items = [s.item_id for s in skills] environment = get_environment() model = get_predictive_model() predictions = model.predict_more_items(environment, request.user.pk, items, datetime.now()) answer_counts = environment.read_more_items('answer_count', user=request.user.pk, items=items, default=0) data = {} for s, p, i in zip(skills, predictions, items): data[s.identifier] = { 'name': s.name, 'prediction': p, 'answer_count': answer_counts[i] } return JsonResponse({ 'structure': SKILL_TABLES[skill_identifier], 'data': data })
def home(request, hack=None): JS_FILES = ( "dist/js/bower-libs.min.js", "dist/js/unminifiable-libs.js", "dist/js/anatomy.min.js", ) CSS_FILES = ( "dist/css/bower-libs.css", "dist/css/app.css", ) if not hasattr(request.user, "userprofile") or request.user.userprofile is None: environment = get_environment() user = json.dumps({ 'user': {}, 'number_of_answers': environment.number_of_answers(user=request.user.id) if request.user.id is not None else 0, 'number_of_correct_answers': environment.number_of_correct_answers(user=request.user.id) if request.user.id is not None else 0, }) email = '' else: if hack is None: return redirect('/overview/') user = json.dumps(request.user.userprofile.to_json(stats=True)) email = request.user.email if not request.user.userprofile.public: request.user.userprofile.public = True request.user.userprofile.save() hour_ago = datetime.now() - timedelta(hours=1) stats = { 'number_of_answers': FlashcardAnswer.objects.count(), 'answers_per_second': FlashcardAnswer.objects.filter( time__gt=hour_ago).count() / 3600.0, 'number_of_flashcards': Flashcard.objects.filter( active=True, lang=get_language()).count(), } if hack == 'home': hack = None c = { 'title': _('Anatom.cz') + ' - ' + _('procvičování anatomie člověka v obrázcích'), 'headline': get_headline_from_url(hack), 'is_production': settings.ON_PRODUCTION, 'css_files': CSS_FILES, 'js_files': JS_FILES, 'screenshot_files': get_screenshot_files(request, hack), 'user_json': user, 'email': email, 'LANGUAGE_CODE': get_language(), 'LANGUAGES': settings.LANGUAGES, 'LANGUAGE_DOMAINS': settings.LANGUAGE_DOMAINS, 'is_homepage': hack is None, 'hack': hack or '', 'config_json': json.dumps(get_global_config()), 'DOMAIN': request.build_absolute_uri('/')[:-1], 'stats_json': json.dumps(stats), 'canonical_url': 'https://' + request.META['HTTP_HOST'] + request.get_full_path().split('?')[0].replace('//', '/'), 'base': '//' + request.META['HTTP_HOST'], 'canonical_path': request.get_full_path().split('?')[0][1:].replace('//', '/'), } return render_to_response('home.html', c)
def enrich_mean_time(request, json_list, nested): from proso_models import models items = [question["payload"]["item_id"] for question in json_list] environment = models.get_environment() times = environment.read_more_items('time_intensity', items=items, default=math.log(DEFAULT_MEAN_TIME)) for question in json_list: question["payload"]["mean_time"] = round(math.exp(times[question["payload"]["item_id"]]))
def options(request, json_list, nested): environment = get_environment() user_id = get_user_id(request) time = get_time(request) if is_time_overridden(request): environment.shift_time(time) item_selector = get_item_selector() option_selector = get_option_selector(item_selector) option_sets = get_option_set().get_option_for_flashcards([ (question['payload'], question['question_type']) for question in json_list if question['payload']['object_type'] == 'fc_flashcard' ]) metas = [question.get('meta', {}) for question in json_list] test_position = _test_index(metas) selected_items = [question['payload']['item_id'] for question in json_list if question['payload']['object_type'] == 'fc_flashcard'] allow_zero_option = {} for question in json_list: if question['payload']['object_type'] != 'fc_flashcard': continue if len(option_sets[question['payload']['item_id']]) == 0 and 'term_secondary' not in question['payload']: # If we do not have enough options, we have to force direction question['question_type'] = FlashcardAnswer.FROM_TERM disable_open_questions = False if question['payload']['disable_open_questions']: disable_open_questions = True elif question['payload']['restrict_open_questions']: disable_open_questions = question['question_type'] in {FlashcardAnswer.FROM_DESCRIPTION, FlashcardAnswer.FROM_TERM_TO_TERM_SECONDARY} allow_zero_option[question['payload']['item_id']] = question['question_type'] in {FlashcardAnswer.FROM_TERM, FlashcardAnswer.FROM_TERM_SECONDARY_TO_TERM} and not disable_open_questions all_options = {i: options for i, options in zip(selected_items, option_selector.select_options_more_items( environment, user_id, selected_items, time, option_sets, allow_zero_options=allow_zero_option ))} options_json_list = [] # HACK: Here, we have to take into account reference questions with zero # options. In case of zero options we have to force a question type if the # restriction for zero options is enabled. config_zero_options_restriction = get_config('proso_models', 'options_count.parameters.allow_zero_options_restriction', default=False) for i, question in enumerate(json_list): if question['payload']['object_type'] != 'fc_flashcard': continue if test_position is not None and test_position == i: if 'term_secondary' not in question['payload'] and config_zero_options_restriction: question['question_type'] = FlashcardAnswer.FROM_TERM question['payload']['options'] = [] continue options = all_options[question['payload']['item_id']] question['payload']['options'] = [Item.objects.item_id_to_json(o) for o in options] options_json_list += question['payload']['options'] item2object(request, options_json_list, nested=True) for question in json_list: if question['payload']['object_type'] != 'fc_flashcard': continue sort_key = 'term_secondary' if question['question_type'] == FlashcardAnswer.FROM_TERM_TO_TERM_SECONDARY else 'term' question['payload']['options'] = sorted(question['payload']['options'], key=lambda o: o[sort_key]['name'])
def enrich_mean_time(request, json_list, nested): from proso_models import models items = [question["payload"]["item_id"] for question in json_list] environment = models.get_environment() times = environment.read_more_items('time_intensity', items=items, default=math.log(DEFAULT_MEAN_TIME)) for question in json_list: question["payload"]["mean_time"] = round( math.exp(times[question["payload"]["item_id"]]))
def recalculate_concepts(self, concepts, lang=None): """ Recalculated given concepts for given users Args: concepts (dict): user id (int -> set of concepts to recalculate) lang(Optional[str]): language used to get items in all concepts (cached). Defaults to None, in that case are get items only in used concepts """ if len(concepts) == 0: return if lang is None: items = Concept.objects.get_concept_item_mapping(concepts=Concept.objects.filter(pk__in=set(flatten(concepts.values())))) else: items = Concept.objects.get_concept_item_mapping(lang=lang) environment = get_environment() mastery_threshold = get_mastery_trashold() for user, concepts in concepts.items(): all_items = list(set(flatten([items[c] for c in concepts]))) answer_counts = dict(list(zip(all_items, environment.number_of_answers_more_items(all_items, user)))) correct_answer_counts = dict(list(zip(all_items, environment.number_of_correct_answers_more_items(all_items, user)))) predictions = dict(list(zip(all_items, get_predictive_model(). predict_more_items(environment, user, all_items, time=None)))) new_user_stats = [] stats_to_delete_condition = Q() for concept in concepts: answer_aggregates = Answer.objects.filter(user=user, item__in=items[concept]).aggregate( time_spent=Sum("response_time"), sessions=Count("session", True), time_first=Min("time"), time_last=Max("time"), ) stats = { "answer_count": sum(answer_counts[i] for i in items[concept]), "correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]), "item_count": len(items[concept]), "practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]), "mastered_items_count": sum([predictions[i] >= mastery_threshold for i in items[concept]]), "prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]), "time_spent": answer_aggregates["time_spent"] / 1000, "session_count": answer_aggregates["sessions"], "time_first": answer_aggregates["time_first"].timestamp(), "time_last": answer_aggregates["time_last"].timestamp(), } stats_to_delete_condition |= Q(user=user, concept=concept) for stat_name, value in stats.items(): new_user_stats.append(UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value)) self.filter(stats_to_delete_condition).delete() self.bulk_create(new_user_stats)
def options(request, json_list, nested): environment = get_environment() user_id = get_user_id(request) time = get_time(request) if is_time_overridden(request): environment.shift_time(time) item_selector = get_item_selector() option_selector = get_option_selector(item_selector) option_sets = get_option_set().get_option_for_flashcards([ question['payload'] for question in json_list if question['payload']['object_type'] == 'fc_flashcard' ]) metas = [question.get('meta', {}) for question in json_list] test_position = _test_index(metas) selected_items = [question['payload']['item_id'] for question in json_list] allow_zero_option = {} for question in json_list: if question['payload']['object_type'] != 'fc_flashcard': continue if len(option_sets[question['payload']['item_id']]) == 0: # If we do not have enough options, we have to force direction question['question_type'] = FlashcardAnswer.FROM_TERM allow_zero_option[question['payload']['item_id']] = question[ 'question_type'] == FlashcardAnswer.FROM_TERM is_flashcard_question = [ question['payload']['object_type'] == 'fc_flashcard' for question in json_list ] if not all(is_flashcard_question): # TODO: We should support mixed questions in the future raise Exception('All questions must be for flashcards!') all_options = option_selector.select_options_more_items( environment, user_id, selected_items, time, option_sets, allow_zero_options=allow_zero_option) options_json_list = [] for i, (question, options) in enumerate(zip(json_list, all_options)): if test_position is not None and test_position == i: question['question_type'] = FlashcardAnswer.FROM_TERM question['payload']['options'] = [] continue question['payload']['options'] = [ Item.objects.item_id_to_json(o) for o in options ] options_json_list += question['payload']['options'] item2object(request, options_json_list, nested=False)
def home(request, hack=None): JS_FILES = ( "dist/js/bower-libs.min.js", "dist/js/proso-apps-all.js", "dist/js/geography.min.js", "dist/js/geography.html.js", ) CSS_FILES = ( "dist/css/bower-libs.css", "dist/css/app.css", "dist/css/map.css" ) if not hasattr(request.user, "userprofile") or request.user.userprofile is None: environment = get_environment() user = json.dumps({ 'user': {}, 'number_of_answers': environment.number_of_answers(user=request.user.id) if request.user.id is not None else 0, 'number_of_correct_answers': environment.number_of_correct_answers(user=request.user.id) if request.user.id is not None else 0, }) email = '' else: if get_config('proso_user', 'google.openid.migration', default=True) and not is_user_id_overridden(request): migrated_user = migrate_google_openid_user(request.user) if migrated_user is not None: auth.logout(request) migrated_user.backend = 'social_auth.backends.google.GoogleOAuth2Backend' auth.login(request, migrated_user) user = json.dumps(request.user.userprofile.to_json(stats=True)) email = request.user.email c = { 'title': _(u'Slepé mapy') + ' - ' + _(u'inteligentní aplikace na procvičování zeměpisu'), 'map': get_map_from_url(hack), 'is_production': settings.ON_PRODUCTION, 'css_files': CSS_FILES, 'js_files': JS_FILES, 'continents': Category.objects.filter( lang=get_language(), type='continent').order_by('name'), 'states': Category.objects.filter( lang=get_language(), type='state').order_by('name'), 'user_json': user, 'email': email, 'LANGUAGE_CODE': get_language(), 'LANGUAGES': settings.LANGUAGES, 'is_homepage': hack is None, 'config_json': json.dumps(get_global_config()), } return render_to_response('home.html', c)
def home(request, hack=None): if not hasattr(request.user, "userprofile") or request.user.userprofile is None: environment = get_environment() user = json.dumps({ 'user': {}, 'number_of_answers': environment.number_of_answers(user=request.user.id) if request.user.id is not None else 0, 'number_of_correct_answers': environment.number_of_correct_answers(user=request.user.id) if request.user.id is not None else 0, }) email = '' else: if hack is None: return redirect('/overview/') if get_config('proso_user', 'google.openid.migration', default=True) and not is_user_id_overridden(request): migrated_user = migrate_google_openid_user(request.user) if migrated_user is not None: auth.logout(request) migrated_user.backend = 'social_auth.backends.google.GoogleOAuth2Backend' auth.login(request, migrated_user) user = json.dumps(request.user.userprofile.to_json(stats=True)) email = request.user.email c = { 'title': _(u'Slepé mapy') + ' - ' + _(u'inteligentní aplikace na procvičování zeměpisu'), 'map': get_map_from_url(hack), 'is_production': settings.ON_PRODUCTION, 'css_files': CSS_FILES, 'map_files': get_map_files(), 'js_files': JS_FILES, 'continents': Category.objects.filter( lang=get_language(), type='continent').order_by('name'), 'states': Category.objects.filter( lang=get_language(), type='state').order_by('name'), 'regions': Category.objects.filter( lang=get_language(), type='region').order_by('name'), 'user_json': user, 'email': email, 'LANGUAGE_CODE': get_language(), 'LANGUAGES': settings.LANGUAGES, 'LANGUAGE_DOMAINS': settings.LANGUAGE_DOMAINS if hasattr( settings, 'LANGUAGE_DOMAINS') else {}, 'is_homepage': hack is None, 'hack': hack or '', 'config_json': json.dumps(get_global_config()), 'DOMAIN': request.build_absolute_uri('/')[:-1], 'screenshot_files': get_screenshot_files(request, hack), } return render_to_response('home.html', c)
def env_variables(request, json_list, nested, variable_type): if 'environment' not in request.GET: return environment = get_environment() items = [json["item_id"] for json in json_list] for json in json_list: if env_variables not in json: json["env_variables"] = {} for (key, user, relationship) in variable_type: if not relationship: for json, v in zip(json_list, environment.read_more_items(key, items, user)): if v: json["env_variables"][key] = v else: for json, v in zip(json_list, environment.get_items_with_values_more_items(key, items, user)): json["env_variables"][key] = dict(v)
def env_variables(request, json_list, nested, variable_type): if 'environment' not in request.GET: return environment = get_environment() items = [json["item_id"] for json in json_list] for json in json_list: if env_variables not in json: json["env_variables"] = {} for (key, user, relationship) in variable_type: if not relationship: for json, v in zip(json_list, environment.read_more_items(key, items, user)): if v: json["env_variables"][key] = v else: for json, v in zip( json_list, environment.get_items_with_values_more_items( key, items, user)): json["env_variables"][key] = dict(v)
def small_concepts(request, skill_identifier): if skill_identifier not in SKILL_TABLES: return JsonResponse({'msg': 'no_table'}) skill = get_object_or_404(Skill, identifier=skill_identifier) skills = Skill.objects.filter(item__child_relations__parent=skill.item_id, active=True) items = [s.item_id for s in skills] environment = get_environment() model = get_predictive_model() predictions = model.predict_more_items(environment, request.user.pk, items, datetime.now()) answer_counts = environment.read_more_items('answer_count', user=request.user.pk, items=items, default=0) data = {} for s, p, i in zip(skills, predictions, items): data[s.identifier] = { 'name': s.name, 'prediction':p, 'answer_count': answer_counts[i] } return JsonResponse({ 'structure': SKILL_TABLES[skill_identifier], 'data': data })
def options(request, json_list, nested): environment = get_environment() user_id = get_user_id(request) time = get_time(request) if is_time_overridden(request): environment.shift_time(time) item_selector = get_item_selector() option_selector = get_option_selector(item_selector) option_sets = get_option_set().get_option_for_flashcards([ (question['payload'], question['question_type']) for question in json_list if question['payload']['object_type'] == 'fc_flashcard' ]) metas = [question.get('meta', {}) for question in json_list] test_position = _test_index(metas) selected_items = [ question['payload']['item_id'] for question in json_list if question['payload']['object_type'] == 'fc_flashcard' ] allow_zero_option = {} for question in json_list: if question['payload']['object_type'] != 'fc_flashcard': continue if len(option_sets[question['payload']['item_id']] ) == 0 and 'term_secondary' not in question['payload']: # If we do not have enough options, we have to force direction question['question_type'] = FlashcardAnswer.FROM_TERM disable_open_questions = False if question['payload']['disable_open_questions']: disable_open_questions = True elif question['payload']['restrict_open_questions']: disable_open_questions = question['question_type'] in { FlashcardAnswer.FROM_DESCRIPTION, FlashcardAnswer.FROM_TERM_TO_TERM_SECONDARY } allow_zero_option[question['payload'] ['item_id']] = question['question_type'] in { FlashcardAnswer.FROM_TERM, FlashcardAnswer.FROM_TERM_SECONDARY_TO_TERM } and not disable_open_questions all_options = { i: options for i, options in zip( selected_items, option_selector.select_options_more_items( environment, user_id, selected_items, time, option_sets, allow_zero_options=allow_zero_option)) } options_json_list = [] # HACK: Here, we have to take into account reference questions with zero # options. In case of zero options we have to force a question type if the # restriction for zero options is enabled. config_zero_options_restriction = get_config( 'proso_models', 'options_count.parameters.allow_zero_options_restriction', default=False) for i, question in enumerate(json_list): if question['payload']['object_type'] != 'fc_flashcard': continue if test_position is not None and test_position == i: if 'term_secondary' not in question[ 'payload'] and config_zero_options_restriction: question['question_type'] = FlashcardAnswer.FROM_TERM question['payload']['options'] = [] continue options = all_options[question['payload']['item_id']] question['payload']['options'] = [ Item.objects.item_id_to_json(o) for o in options ] options_json_list += question['payload']['options'] item2object(request, options_json_list, nested=True) for question in json_list: if question['payload']['object_type'] != 'fc_flashcard': continue sort_key = 'term_secondary' if question[ 'question_type'] == FlashcardAnswer.FROM_TERM_TO_TERM_SECONDARY else 'term' question['payload']['options'] = sorted( question['payload']['options'], key=lambda o: o[sort_key]['name'])
def home(request, hack=None): min_hack = '.min' if 'unmin' not in request.GET else '' print(min_hack, request.GET.get('unmin', 'HHH')) JS_FILES = ( "dist/js/bower-libs" + min_hack + ".js", "dist/js/unminifiable-libs.js", "dist/js/anatomy" + min_hack + ".js", ) CSS_FILES = ( "dist/css/all.min.css", ) if not hasattr(request.user, "userprofile") or request.user.userprofile is None: environment = get_environment() user = { 'user': { 'username': '', }, 'number_of_answers': environment.number_of_answers(user=request.user.id) if request.user.id is not None else 0, 'number_of_correct_answers': environment.number_of_correct_answers(user=request.user.id) if request.user.id is not None else 0, } email = '' else: if hack is None: return redirect('/overview/') user = request.user.userprofile.to_json(stats=True) user['subscribed'] = has_active_subscription(request) email = request.user.email if not request.user.userprofile.public: request.user.userprofile.public = True request.user.userprofile.save() user_json = json.dumps(user) stats = { 'number_of_answers': FlashcardAnswer.objects.count(), } if hack == 'home': hack = None categories = [c.to_json() for c in Category.objects.filter( lang=get_language(), active=True)] c = { 'title': _('Anatom.cz') + ' - ' + _('procvičování anatomie člověka v obrázcích'), 'headline': get_headline_from_url(hack), 'is_production': settings.ON_PRODUCTION, 'css_files': CSS_FILES, 'js_files': JS_FILES, 'screenshot_files': get_screenshot_files(request, hack), 'user_json': user_json, 'user': user, 'email': email, 'LANGUAGE_CODE': get_language(), 'LANGUAGES': settings.LANGUAGES, 'LANGUAGE_DOMAINS': settings.LANGUAGE_DOMAINS, 'is_practice': hack is not None and hack.startswith("practice/"), 'include_template': get_template(request, hack), 'hack': hack or '', 'config_json': json.dumps(get_global_config()), 'DOMAIN': request.build_absolute_uri('/')[:-1], 'stats': stats, 'stats_json': json.dumps(stats), 'canonical_url': 'https://' + request.META['HTTP_HOST'] + request.get_full_path().split('?')[0].replace('//', '/'), 'base': '//' + request.META['HTTP_HOST'], 'canonical_path': request.get_full_path().split('?')[0][1:].replace('//', '/'), 'categories_json': json.dumps({'data': categories}), 'show_inspectlet': random.randrange(10) < 1, } return render_to_response('home.html', c)
def recalculate_concepts(self, concepts, lang=None): """ Recalculated given concepts for given users Args: concepts (dict): user id (int -> set of concepts to recalculate) lang(Optional[str]): language used to get items in all concepts (cached). Defaults to None, in that case are get items only in used concepts """ if len(concepts) == 0: return if lang is None: items = Concept.objects.get_concept_item_mapping( concepts=Concept.objects.filter( pk__in=set(flatten(concepts.values())))) else: items = Concept.objects.get_concept_item_mapping(lang=lang) environment = get_environment() mastery_threshold = get_mastery_trashold() for user, concepts in concepts.items(): all_items = list(set(flatten([items[c] for c in concepts]))) answer_counts = dict( list( zip( all_items, environment.number_of_answers_more_items( all_items, user)))) correct_answer_counts = dict( list( zip( all_items, environment.number_of_correct_answers_more_items( all_items, user)))) predictions = dict( list( zip( all_items, get_predictive_model().predict_more_items(environment, user, all_items, time=None)))) new_user_stats = [] stats_to_delete_condition = Q() for concept in concepts: answer_aggregates = Answer.objects.filter( user=user, item__in=items[concept]).aggregate( time_spent=Sum("response_time"), sessions=Count("session", True), time_first=Min("time"), time_last=Max("time"), ) stats = { "answer_count": sum(answer_counts[i] for i in items[concept]), "correct_answer_count": sum(correct_answer_counts[i] for i in items[concept]), "item_count": len(items[concept]), "practiced_items_count": sum([answer_counts[i] > 0 for i in items[concept]]), "mastered_items_count": sum([ predictions[i] >= mastery_threshold for i in items[concept] ]), "prediction": sum([predictions[i] for i in items[concept]]) / len(items[concept]), "time_spent": answer_aggregates["time_spent"] / 1000, "session_count": answer_aggregates["sessions"], "time_first": answer_aggregates["time_first"].timestamp(), "time_last": answer_aggregates["time_last"].timestamp(), } stats_to_delete_condition |= Q(user=user, concept=concept) for stat_name, value in stats.items(): new_user_stats.append( UserStat(user_id=user, concept_id=concept, stat=stat_name, value=value)) self.filter(stats_to_delete_condition).delete() self.bulk_create(new_user_stats)