def get_context_data(self, **kwargs): data = super(BaseViewItemMixin, self).get_context_data(**kwargs) item = self.item request = self.request model = self.model content_type = self.content_type data["item"] = item data["content_type"] = content_type microsite = None came_from_index = False prev_item_url = u"" next_item_url = u"" index_url = u"" kwargs = {} filters = {} index_path = None if "_i" in request.COOKIES: filters = dict(QueryDict(urllib.unquote(request.COOKIES["_i"]))) #noinspection PyArgumentEqualDefault index_path = filters.pop("index_path", None) if index_path and isinstance(index_path, list): index_path = index_path[0] if index_path: try: kwargs = resolve(index_path)[2] came_from_index = True except Http404: pass if came_from_index: query = SearchQuerySet().narrow("is_displayed:true") index_model = kwargs.get("model") microsite = kwargs.get("microsite") if microsite: microsite = Microsite.objects.get(slug=microsite) if index_model: query = query.models(index_model) path_filter = None query_string_params = {} search_query = u"" for filter_name in PATH_FILTERS: value = kwargs.get(filter_name) if value is not None: filter = FILTERS[filter_name] query = filter.update_query(query, value) path_filter = filter_name break dummy_request = DummyRequest(filters) for filter_name, filter in FILTERS.items(): if filter_name == path_filter: continue value = filter.extract_value(dummy_request) if value is not None: query = filter.update_query(query, value) query_string_params = filter.update_query_string_params(query_string_params, value) if filter_name == "search": search_query = value index_params = IndexParams(dummy_request, search_query=search_query) query_string_params = index_params.update_query_string_params(query_string_params) if index_params.query_order_by is not None: query = query.order_by(index_params.query_order_by) current_item_idx = 0 item_found = False for result in query: if result.model == model and int(result.pk) == item.id: if current_item_idx > 0: prev_item = query[current_item_idx - 1] namespace = getattr(prev_item.model, "namespace", None) if namespace: prev_item_url = reverse("materials:%s:%s" % (namespace, self.view_item_name), kwargs=dict(slug=prev_item.get_stored_fields()["slug"])) else: prev_item_url = prev_item.object.get_absolute_url() if current_item_idx < (len(query) - 1): next_item = query[current_item_idx + 1] namespace = getattr(next_item.model, "namespace", None) if namespace: next_item_url = reverse("materials:%s:%s" % (namespace, self.view_item_name), kwargs=dict(slug=next_item.get_stored_fields()["slug"])) else: next_item_url = next_item.object.get_absolute_url() item_found = True break current_item_idx += 1 batch_start = (current_item_idx / index_params.batch_size) * index_params.batch_size if batch_start: query_string_params["batch_start"] = batch_start if item_found: index_url = index_path + serialize_query_string_params(query_string_params) data["microsite"] = microsite data["came_from_index"] = came_from_index data["index_url"] = index_url data["prev_item_url"] = prev_item_url data["next_item_url"] = next_item_url if came_from_index: data["index_cookie"] = request.COOKIES.get("_i") if request.user.is_authenticated(): data["saved"] = SavedItem.objects.filter( content_type=content_type, object_id=item.id, user=request.user ).exists() data["save_url"] = reverse("materials:%s:save_item" % item.namespace, kwargs=dict(slug=item.slug)) data["unsave_url"] = reverse("materials:%s:unsave_item" % item.namespace, kwargs=dict(slug=item.slug)) data["comment_url"] = reverse( "reviews:review", kwargs=dict(content_type_id=content_type.id, object_id=item.id) ) data["add_tags_url"] = reverse("tags:add_tags", args=( content_type.app_label, content_type.model, item.id, )) return data
def index( request, general_subjects=None, grade_levels=None, course_material_types=None, library_material_types=None, collection=None, keywords=None, license=None, course_or_module=None, community_types=None, community_topics=None, microsite=None, model=None, search=False, tags=None, subjects=None, format=None, topics=None, alignment=None, facet_fields=None, ): if not facet_fields: facet_fields = [ "general_subjects", "grade_levels", "keywords", "course_material_types", "media_formats", "cou_bucket", "indexed_topics", ] if model: index_namespace = model.namespace else: index_namespace = None if tags or subjects: # Tags and subjects are old path filters which are combined to # keywords filter now. # Redirect to keyword index. keywords = tags or subjects if index_namespace: url = reverse("materials:%s:keyword_index" % index_namespace, kwargs=dict(keywords=keywords)) else: url = reverse("materials:keyword_index", kwargs=dict(keywords=keywords)) return HttpResponsePermanentRedirect(url) if keywords: slugified_keywords = slugify(keywords) if not slugified_keywords: raise Http404() if slugified_keywords != keywords: # Keywords should be slugified. # Redirect to keyword index with slugified keyword. if index_namespace: url = reverse("materials:%s:keyword_index" % index_namespace, kwargs=dict(keywords=slugified_keywords)) else: url = reverse("materials:keyword_index", kwargs=dict(keywords=slugified_keywords)) return HttpResponsePermanentRedirect(url) query_string_params = {} filter_values = {} page_title = u"Browse" page_subtitle = u"" breadcrumbs = [{"url": reverse("materials:browse"), "title": u"OER Materials"}] if not format: format = "html" if request.REQUEST.get("feed", None) == "yes": format = "rss" elif request.REQUEST.get("csv", None) == "yes": if not request.user.is_authenticated() or not request.user.is_staff: raise Http404() format = "csv" query = SearchQuerySet().narrow("is_displayed:true") if model: query = query.models(model) path_filter = None hidden_filters = {} for filter_name in PATH_FILTERS: value = locals()[filter_name] if value is not None: filter = FILTERS[filter_name] query = filter.update_query(query, value) path_filter = filter_name if page_subtitle: page_subtitle = u"%s → %s" % (page_subtitle, filter.page_subtitle(value)) else: page_subtitle = filter.page_subtitle(value) filter_values[filter_name] = value visible_filters = [ "search", "general_subjects", "grade_levels", "course_material_types", "media_formats", "cou_bucket", ] if microsite: microsite = Microsite.objects.get(slug=microsite) visible_filters.append("topics") search_query = u"" for filter_name, filter in FILTERS.items(): if filter_name == path_filter: continue value = filter.extract_value(request) if value is not None: query = filter.update_query(query, value) query_string_params = filter.update_query_string_params(query_string_params, value) filter_values[filter_name] = value if filter_name not in visible_filters: hidden_filters[filter.request_name] = value if filter_name == "search": search_query = value if search: if not search_query: if filter_values: return HttpResponsePermanentRedirect( reverse("materials:index") + serialize_query_string_params(query_string_params) ) else: messages.warning(request, u"You should specify the search term") return HttpResponsePermanentRedirect(reverse("materials:advanced_search")) page_title = u"Search Results" page_subtitle = search_query breadcrumbs = [{"url": reverse("materials:search"), "title": page_title}] elif model == CommunityItem: breadcrumbs = [{"url": reverse("materials:community"), "title": u"OER Community"}] if microsite: breadcrumbs = [ { "url": reverse("materials:microsite", kwargs=dict(microsite=microsite.slug)), "title": u"%s Home" % microsite.name, } ] if not page_subtitle and model: page_subtitle = u"Content Type: %s" % model._meta.verbose_name_plural elif not page_subtitle and filter_values: filter_name = filter_values.keys()[0] filter = FILTERS[filter_name] page_subtitle = filter.page_subtitle(filter_values[filter_name]) index_params = IndexParams(request, format, search_query) query_string_params = index_params.update_query_string_params(query_string_params) index_url = request.path + serialize_query_string_params(query_string_params, ignore_params=["batch_start"]) if page_subtitle: index_title = u"%s: %s" % (page_title, page_subtitle) else: index_title = page_title feed_url = request.path + serialize_query_string_params( dict(query_string_params.items() + [("feed", "yes")]), ignore_params=["batch_start"] ) csv_url = request.path + serialize_query_string_params( dict(query_string_params.items() + [("csv", "yes")]), ignore_params=["batch_start"] ) batch_end = index_params.batch_start + index_params.batch_size if len(filter_values) == 1 and "featured" in filter_values: query = query.order_by("-featured_on") elif len(filter_values) == 1 and "evaluated_rubrics" in filter_values: query = query.order_by("-evaluation_score_rubric_%i" % filter_values["evaluated_rubrics"][0]) elif index_params.query_order_by is not None: query = query.order_by(index_params.query_order_by) if index_params.sort_by == "visits" and not filter_values: query = query.narrow("visits:[1 TO *]") items = [] if format == "html": for facet_field in facet_fields: query = query.facet(facet_field) total_items = len(query) if total_items and index_params.batch_start >= total_items: return HttpResponsePermanentRedirect(index_url) results = query[index_params.batch_start : batch_end] for result in results: if result is None: continue items.append(populate_item_from_search_result(result)) pagination = Pagination( request.path, query_string_params, index_params.batch_start, index_params.batch_size, total_items ) facets = query.facet_counts().get("fields", {}) index_filters = build_index_filters(visible_filters, facets, filter_values, path_filter, microsite) all_keywords = query.count() and facets.get("keywords", []) or [] if len(all_keywords) > MAX_TOP_KEYWORDS: top_keywords = get_tag_cloud(dict(all_keywords[:MAX_TOP_KEYWORDS]), 3, 0, 0) all_keywords = get_tag_cloud(dict(all_keywords), 3, 0, 0) else: top_keywords = get_tag_cloud(dict(all_keywords), 3, 0, 0) all_keywords = [] for keyword in top_keywords: name = ( get_name_from_slug(Keyword, keyword["slug"]) or get_name_from_slug(Tag, keyword["slug"]) or keyword["slug"] ) keyword["name"] = name for keyword in all_keywords: name = ( get_name_from_slug(Keyword, keyword["slug"]) or get_name_from_slug(Tag, keyword["slug"]) or keyword["slug"] ) keyword["name"] = name if request.is_ajax(): output = render_to_string("materials/include/index-items.html", RequestContext(request, locals())) data = dict( items=output, first_item_number=pagination.first_item_number, last_item_number=pagination.last_item_number, total_items=pagination.total_items, page_title=unicode(page_title), page_subtitle=page_subtitle and unicode(page_subtitle or u""), ) return JsonResponse(data) return direct_to_template(request, "materials/index.html", locals()) elif format == "rss": results = query[0:20] for result in results: if result is None: continue item = result.get_stored_fields() if item.get("general_subjects"): item["general_subjects"] = [get_name_from_id(GeneralSubject, id) for id in item["general_subjects"]] namespace = getattr(result.model, "namespace", None) if namespace: item["get_absolute_url"] = reverse("materials:%s:view_item" % namespace, kwargs=dict(slug=item["slug"])) else: item["get_absolute_url"] = result.object.get_absolute_url() item["model_verbose_name"] = result.model._meta.verbose_name_plural items.append(item) return direct_to_template(request, "materials/index-rss.xml", locals(), "text/xml") elif format == "json": results = query[index_params.batch_start : batch_end] for result in results: if result is None: continue data = result.get_stored_fields() item = { "id": result.id, "title": data["title"], "abstract": data["abstract"], "url": data["url"], "keywords": data["keywords_names"], "subject": [get_slug_from_id(GeneralSubject, id) for id in (data["general_subjects"] or [])], "grade_level": [get_slug_from_id(GradeLevel, id) for id in (data["grade_levels"] or [])], "collection": data["collection"] and get_name_from_id(Collection, data["collection"]) or None, } items.append(item) return JsonResponse(items) elif format == "xml": query = query.load_all() results = query[index_params.batch_start : batch_end] for result in results: if result is None: continue object = result.object data = result.get_stored_fields() item = {"url": data["url"], "title": data["title"]} if data.get("authors"): item["author"] = data["authors"][0] if data.get("institution"): item["institution"] = get_name_from_id(Institution, data["institution"]) item["abstract"] = data["abstract"] license = object.license item["copyright_holder"] = license.copyright_holder item["license_url"] = license.url item["license_name"] = license.name item["license_description"] = license.description item["license_type"] = license.type item["cou_bucket"] = license.bucket if data["rating"]: item["rating"] = "%.1f" % data["rating"] item["fields"] = [] grade_levels = data.get("grade_levels") if grade_levels: item["fields"].append( dict( title=u"Grade Level", param=FILTERS["grade_levels"].request_name, value=u",".join([get_slug_from_id(GradeLevel, id) for id in grade_levels]), content=u",".join([get_name_from_id(GradeLevel, id) for id in grade_levels]), ) ) general_subjects = data.get("general_subjects") if general_subjects: item["fields"].append( dict( title=u"Subject", param=FILTERS["general_subjects"].request_name, value=u",".join([get_slug_from_id(GeneralSubject, id) for id in general_subjects]), content=u",".join([get_name_from_id(GeneralSubject, id) for id in general_subjects]), ) ) collection = data.get("collection") if collection: item["fields"].append( dict( title=u"Collection", param=FILTERS["collection"].request_name, value=get_slug_from_id(Collection, collection), content=get_name_from_id(Collection, collection), ) ) geographic_relevance = data.get("geographic_relevance") if geographic_relevance: item["fields"].append( dict( title=u"Geographic Regional Relevance", param=FILTERS["geographic_relevance"].request_name, value=u",".join([get_slug_from_id(GeographicRelevance, id) for id in geographic_relevance]), content=u",".join([get_name_from_id(GeographicRelevance, id) for id in geographic_relevance]), ) ) keywords = object.keywords.values("slug", "name") if keywords: item["fields"].append( dict( title=u"Keywords", param=FILTERS["keywords"].request_name, value=u",".join([k["slug"] for k in keywords]), content=u",".join([k["name"] for k in keywords]), ) ) tags = object.tags.values("slug", "name").order_by("slug").distinct() if tags: item["fields"].append( dict( title=u"Tags", param=FILTERS["keywords"].request_name, value=u",".join([k["slug"] for k in tags]), content=u",".join([k["name"] for k in tags]), ) ) items.append(item) return direct_to_template(request, "materials/index-xml.xml", locals(), "text/xml") elif format == "csv": return csv_export(query, index_title)