Пример #1
0
    def handle(self, *args, **options):
        # license_id = options['license_id']
        channel_id = options['channel_id']
        force = options['force']

        # license = ccmodels.License.objects.get(pk=license_id)
        try:
            channel = ccmodels.Channel.objects.get(pk=channel_id)
            # increment the channel version
            if not force:
                raise_if_nodes_are_all_unchanged(channel)
            fh, tempdb = tempfile.mkstemp(suffix=".sqlite3")

            with using_content_database(tempdb):
                prepare_export_database(tempdb)
                map_content_tags(channel)
                map_channel_to_kolibri_channel(channel)
                map_content_nodes(channel.main_tree,)
                save_export_database(channel_id)
                increment_channel_version(channel)
                mark_all_nodes_as_changed(channel)
                # use SQLite backup API to put DB into archives folder.
                # Then we can use the empty db name to have SQLite use a temporary DB (https://www.sqlite.org/inmemorydb.html)

        except EarlyExit as e:
            logging.warning("Exited early due to {message}.".format(message=e.message))
            self.stdout.write("You can find your database in {path}".format(path=e.db_path))
Пример #2
0
    def handle_network_download(self, *args, **options):
        channel_id = options["channel_id"]

        with using_content_database(channel_id):
            files = File.objects.all()
            total_bytes_to_download = files.aggregate(Sum('file_size'))['file_size__sum']

            with self.start_progress(total=total_bytes_to_download) as overall_progress_update:

                for f in files:
                    filename = f.get_filename()
                    url = paths.get_content_storage_file_url(filename)
                    path = paths.get_content_storage_file_path(filename)

                    # if the file already exists, add its size to our overall progress, and skip
                    # TODO(jamalex): could do md5 checks here instead, to be ultra-safe
                    if os.path.isfile(path) and os.path.getsize(path) == f.file_size:
                        overall_progress_update(f.file_size)
                        continue

                    with transfer.FileDownload(url, path) as download:

                        with self.start_progress(total=download.total_size) as file_dl_progress_update:

                            for chunk in download:
                                length = len(chunk)
                                overall_progress_update(length)
                                file_dl_progress_update(length)
Пример #3
0
    def handle(self, *args, **options):
        # license_id = options['license_id']
        channel_id = options['channel_id']
        force = options['force']
        force_exercises = options['force-exercises']

        # license = ccmodels.License.objects.get(pk=license_id)
        try:
            channel = ccmodels.Channel.objects.get(pk=channel_id)
            # increment the channel version
            if not force:
                raise_if_nodes_are_all_unchanged(channel)
            fh, tempdb = tempfile.mkstemp(suffix=".sqlite3")

            with using_content_database(tempdb):
                prepare_export_database(tempdb)
                map_content_tags(channel)
                map_channel_to_kolibri_channel(channel)
                map_content_nodes(channel.main_tree,
                                  channel.language,
                                  force_exercises=force_exercises)
                map_prerequisites(channel.main_tree)
                save_export_database(channel_id)
                increment_channel_version(channel)
                mark_all_nodes_as_changed(channel)
                # use SQLite backup API to put DB into archives folder.
                # Then we can use the empty db name to have SQLite use a temporary DB (https://www.sqlite.org/inmemorydb.html)

            record_publish_stats(channel)

        except EarlyExit as e:
            logging.warning(
                "Exited early due to {message}.".format(message=e.message))
            self.stdout.write(
                "You can find your database in {path}".format(path=e.db_path))
Пример #4
0
 def retrieve(self, request, pk=None, channelmetadata_channel_id=None):
     with using_content_database(channelmetadata_channel_id):
         context = {'request': request, 'channel_id': channelmetadata_channel_id}
         file = serializers.FileSerializer(
             models.File.objects.get(pk=pk), context=context
         ).data
         return Response(file)
Пример #5
0
 def list(self, request, **kwargs):
     with using_content_database(kwargs['channel_id']):
         file_summary = models.File.objects.aggregate(
             total_files=Count('pk'), total_file_size=Sum('file_size'))
         file_summary['channel_id'] = get_active_content_database()
         # Need to wrap in an array to be fetchable as a Collection on client
         return Response([file_summary])
Пример #6
0
 def get_context_data(self, **kwargs):
     channel_id = getattr(self.request, "channel_id", "dummy_db")
     with using_content_database(channel_id):
         context = super(LearnView, self).get_context_data(**kwargs)
         topics_serializer = ContentNodeSerializer(get_top_level_topics(),
                                                   many=True)
         topics_serializer.context["channel_id"] = channel_id
         context['topics'] = JSONRenderer().render(topics_serializer.data)
     return context
Пример #7
0
 def get_content_title(self, obj):
     try:
         with using_content_database(obj.channel_id):
             node = ContentNode.objects.filter(content_id=obj.content_id).first()
             if node:
                 return node.title
             else:
                 return ""
     except KeyError:  # content DB doesn't exist
         return ""
Пример #8
0
 def retrieve(self, request, pk=None, channelmetadata_channel_id=None):
     skip_preload = []
     if request.method == 'GET' and 'skip' in request.GET:
         skip_preload = ast.literal_eval(request.GET['skip'])
     with using_content_database(channelmetadata_channel_id):
         context = {'request': request, 'channel_id': channelmetadata_channel_id, 'skip_preload': skip_preload}
         content = serializers.ContentNodeSerializer(
             models.ContentNode.objects.get(pk=pk), context=context
         ).data
         return Response(content)
Пример #9
0
    def filter_next_steps(self, queryset, value):
        """
        Recommend uncompleted content, content that has user completed content as a prerequisite.

        :param queryset: all content nodes for this channel
        :param value: id of currently logged in user, or none if user is anonymous
        :return: uncompleted content nodes, or empty queryset if user is anonymous
        """

        # if user is anonymous, don't return any nodes
        if not value:
            return queryset.none()

        if self.data['channel']:
            from kolibri.content.content_db_router import using_content_database
            from kolibri.content.models import ContentNode
            with using_content_database(self.data['channel']):
                tables = [
                    '"{summarylog_table}" AS "complete_log"',
                    '"{summarylog_table}" AS "incomplete_log"',
                    '"{content_table}" AS "complete_node"',
                    '"{content_table}" AS "incomplete_node"',
                ]
                table_names = {
                    "summarylog_table": ContentSummaryLog._meta.db_table,
                    "content_table": ContentNode._meta.db_table,
                }
                # aliases for sql table names
                sql_tables_and_aliases = [
                    table.format(**table_names) for table in tables
                ]
                # where conditions joined by ANDs
                where_statements = [
                    "NOT (incomplete_log.progress < 1 AND incomplete_log.content_id = incomplete_node.content_id)",
                    "complete_log.user_id = {user_id}".format(user_id=value),
                    "incomplete_log.user_id = {user_id}".format(user_id=value),
                    "complete_log.progress = 1",
                    "complete_node.rght = incomplete_node.lft - 1",
                    "complete_log.content_id = complete_node.content_id"
                ]
                # custom SQL query to get uncompleted content based on mptt algorithm
                next_steps_recommendations = "SELECT incomplete_node.* FROM {tables} WHERE {where}".format(
                    tables=", ".join(sql_tables_and_aliases),
                    where=_join_with_logical_operator(where_statements, "AND"))
                return ContentNode.objects.raw(next_steps_recommendations)
        else:
            summary_logs = ContentSummaryLog.objects.filter(
                user=value).exclude(progress=1)

        content_ids = summary_logs.values_list('content_id', flat=True)
        unfinished_nodes = queryset.filter(
            content_id__in=list(content_ids[:10]))

        return unfinished_nodes
Пример #10
0
 def leaves(self, request, channelmetadata_channel_id, *args, **kwargs):
     """
     endpoint for content api method
     leaves(channel_id=None, content=None, **kwargs)
     """
     with using_content_database(channelmetadata_channel_id):
         context = {'request': request, 'channel_id': channelmetadata_channel_id}
         data = serializers.ContentNodeSerializer(
             api.leaves(content=self.kwargs['pk']), context=context, many=True
         ).data
         return Response(data)
Пример #11
0
 def get_context_data(self, **kwargs):
     channel_id = getattr(self.request, "channel_id", "da32c86316b623399732d886af6c7c49")
     context = super(LearnView, self).get_context_data(**kwargs)
     with using_content_database(channel_id):
         root_node = ContentNode.objects.get(parent__isnull=True)
         top_level_nodes = root_node.get_children()
         mcontext = {'request': self.request}
         topics_serializer = ContentNodeSerializer(top_level_nodes, context=mcontext, many=True)
         root_node_serializer = ContentNodeSerializer(root_node, context=mcontext)
         context['nodes'] = JSONRenderer().render(topics_serializer.data)
         context['rootnode'] = JSONRenderer().render(root_node_serializer.data)
     context['kolibri'] = settings.KOLIBRI_CORE_JS_NAME
     context['channel_id'] = channel_id
     return context
Пример #12
0
    def filter_next_steps(self, queryset, value):
        """
        Recommend uncompleted content, content that has user completed content as a prerequisite.

        :param queryset: all content nodes for this channel
        :param value: id of currently logged in user, or none if user is anonymous
        :return: uncompleted content nodes, or empty queryset if user is anonymous
        """

        # if user is anonymous, don't return any nodes
        if not value:
            return queryset.none()

        if self.data['channel']:
            from kolibri.content.content_db_router import using_content_database
            from kolibri.content.models import ContentNode
            with using_content_database(self.data['channel']):
                tables = [
                    '"{summarylog_table}" AS "complete_log"',
                    '"{summarylog_table}" AS "incomplete_log"',
                    '"{content_table}" AS "complete_node"',
                    '"{content_table}" AS "incomplete_node"',
                ]
                table_names = {
                    "summarylog_table": ContentSummaryLog._meta.db_table,
                    "content_table": ContentNode._meta.db_table,
                }
                # aliases for sql table names
                sql_tables_and_aliases = [table.format(**table_names) for table in tables]
                # where conditions joined by ANDs
                where_statements = ["NOT (incomplete_log.progress < 1 AND incomplete_log.content_id = incomplete_node.content_id)",
                                    "complete_log.user_id = {user_id}".format(user_id=value),
                                    "incomplete_log.user_id = {user_id}".format(user_id=value),
                                    "complete_log.progress = 1",
                                    "complete_node.rght = incomplete_node.lft - 1",
                                    "complete_log.content_id = complete_node.content_id"]
                # custom SQL query to get uncompleted content based on mptt algorithm
                next_steps_recommendations = "SELECT incomplete_node.* FROM {tables} WHERE {where}".format(
                    tables=", ".join(sql_tables_and_aliases),
                    where=_join_with_logical_operator(where_statements, "AND")
                )
                return ContentNode.objects.raw(next_steps_recommendations)
        else:
            summary_logs = ContentSummaryLog.objects.filter(user=value).exclude(progress=1)

        content_ids = summary_logs.values_list('content_id', flat=True)
        unfinished_nodes = queryset.filter(content_id__in=list(content_ids[:10]))

        return unfinished_nodes
Пример #13
0
    def get_context_data(self, **kwargs):
        context = super(LearnView, self).get_context_data(**kwargs)
        context['channelList'] = []
        context['channel_id'] = ''
        context['nodes'] = []
        context['rootnode'] = []

        channels = ChannelMetadataCache.objects.all()
        if not channels:
            return context
        else:
            channel_serializer = ChannelMetadataCacheSerializer(channels,
                                                                many=True)
            channel_list = JSONRenderer().render(channel_serializer.data)
            context['channelList'] = channel_list

            cookie_current_channel = self.request.COOKIES.get("currentChannel")
            channelExists = False
            for channel in ChannelMetadataCache.objects.all():
                if channel.id == cookie_current_channel:
                    channelExists = True
                    break
            if (cookie_current_channel is not None) and channelExists:
                channel_id = cookie_current_channel
            else:
                channel_id = ChannelMetadataCache.objects.first().id

            context['channel_id'] = channel_id

            try:
                with using_content_database(channel_id):
                    root_node = ContentNode.objects.get(parent__isnull=True)
                    top_level_nodes = root_node.get_children()
                    mcontext = {'request': self.request}
                    topics_serializer = ContentNodeSerializer(top_level_nodes,
                                                              context=mcontext,
                                                              many=True)
                    root_node_serializer = ContentNodeSerializer(
                        root_node, context=mcontext)
                    context['nodes'] = JSONRenderer().render(
                        topics_serializer.data)
                    context['rootnode'] = JSONRenderer().render(
                        root_node_serializer.data)
            except OperationalError as e:
                logging.debug('Database error while loading content data', e)

        return context
Пример #14
0
    def handle_network_download(self, *args, **options):
        channel_id = options["channel_id"]

        content_download_url_template = os.path.join(
            settings.CENTRAL_CONTENT_DOWNLOAD_DOMAIN,
            "{filename}",
        )

        with using_content_database(channel_id):
            files = File.objects.all()
            total_bytes_to_download = files.aggregate(
                Sum('file_size'))['file_size__sum']

            with self.start_progress(
                    total=total_bytes_to_download) as overall_progress_update:

                for f in files:
                    url = content_download_url_template.format(
                        filename=f.get_url())
                    path = CONTENT_DEST_PATH_TEMPLATE.format(
                        filename=f.get_url())

                    try:
                        filedir = os.path.dirname(path)
                        os.makedirs(filedir)
                    except OSError:  # directories already exist
                        pass

                    r = requests.get(url, stream=True)
                    r.raise_for_status()
                    contentlength = int(r.headers['content-length'])

                    with self.start_progress(
                            total=contentlength) as file_dl_progress_update:

                        with open(path, "wb") as destfileobj:

                            for content in r.iter_content(1000):
                                length = len(content)

                                destfileobj.write(content)

                                overall_progress_update(length)
                                file_dl_progress_update(length)
Пример #15
0
 def get_context_data(self, **kwargs):
     channel_id = getattr(self.request, "channel_id",
                          "da32c86316b623399732d886af6c7c49")
     context = super(LearnView, self).get_context_data(**kwargs)
     with using_content_database(channel_id):
         root_node = ContentNode.objects.get(parent__isnull=True)
         top_level_nodes = root_node.get_children()
         mcontext = {'request': self.request}
         topics_serializer = ContentNodeSerializer(top_level_nodes,
                                                   context=mcontext,
                                                   many=True)
         root_node_serializer = ContentNodeSerializer(root_node,
                                                      context=mcontext)
         context['nodes'] = JSONRenderer().render(topics_serializer.data)
         context['rootnode'] = JSONRenderer().render(
             root_node_serializer.data)
     context['kolibri'] = settings.KOLIBRI_CORE_JS_NAME
     context['channel_id'] = channel_id
     return context
Пример #16
0
    def get_context_data(self, **kwargs):
        context = super(LearnView, self).get_context_data(**kwargs)
        context["channelList"] = []
        context["channel_id"] = ""
        context["nodes"] = []
        context["rootnode"] = []

        channels = ChannelMetadataCache.objects.all()
        if not channels:
            return context
        else:
            channel_serializer = ChannelMetadataCacheSerializer(channels, many=True)
            channel_list = JSONRenderer().render(channel_serializer.data)
            context["channelList"] = channel_list

            cookie_current_channel = self.request.COOKIES.get("currentChannel")
            channelExists = False
            for channel in ChannelMetadataCache.objects.all():
                if channel.id == cookie_current_channel:
                    channelExists = True
                    break
            if (cookie_current_channel is not None) and channelExists:
                channel_id = cookie_current_channel
            else:
                channel_id = ChannelMetadataCache.objects.first().id

            context["channel_id"] = channel_id

            try:
                with using_content_database(channel_id):
                    root_node = ContentNode.objects.get(parent__isnull=True)
                    top_level_nodes = root_node.get_children()
                    mcontext = {"request": self.request}
                    topics_serializer = ContentNodeSerializer(top_level_nodes, context=mcontext, many=True)
                    root_node_serializer = ContentNodeSerializer(root_node, context=mcontext)
                    context["nodes"] = JSONRenderer().render(topics_serializer.data)
                    context["rootnode"] = JSONRenderer().render(root_node_serializer.data)
            except OperationalError as e:
                logging.debug("Database error while loading content data", e)

        return context
Пример #17
0
    def get_context_data(self, **kwargs):
        context = super(LearnView, self).get_context_data(**kwargs)
        context['kolibri'] = settings.KOLIBRI_CORE_JS_NAME
        context['channelList'] = []
        context['channel_id'] = ''
        context['nodes'] = []
        context['rootnode'] = []

        channels = ChannelMetadataCache.objects.all()
        if not channels:
            return context
        else:
            channel_serializer = ChannelMetadataCacheSerializer(channels, many=True)
            channel_list = JSONRenderer().render(channel_serializer.data)
            context['channelList'] = channel_list

            cookie_current_channel = self.request.COOKIES.get("currentChannel")
            channelExists = False
            for channel in ChannelMetadataCache.objects.all():
                if channel.id == cookie_current_channel:
                    channelExists = True
                    break
            if (cookie_current_channel is not None) and channelExists:
                channel_id = cookie_current_channel
            else:
                channel_id = ChannelMetadataCache.objects.first().id

            context['channel_id'] = channel_id

            with using_content_database(channel_id):
                root_node = ContentNode.objects.get(parent__isnull=True)
                top_level_nodes = root_node.get_children()
                mcontext = {'request': self.request}
                topics_serializer = ContentNodeSerializer(top_level_nodes, context=mcontext, many=True)
                root_node_serializer = ContentNodeSerializer(root_node, context=mcontext)
                context['nodes'] = JSONRenderer().render(topics_serializer.data)
                context['rootnode'] = JSONRenderer().render(root_node_serializer.data)

        return context
Пример #18
0
 def list(self, request, channelmetadata_channel_id=None):
     with using_content_database(channelmetadata_channel_id):
         filtered = ContentNodeFilter(request.GET, queryset=models.ContentNode.objects.all())
         context = {'request': request, 'channel_id': channelmetadata_channel_id}
         contents = serializers.ContentNodeSerializer(filtered, context=context, many=True).data
         return Response(contents)
Пример #19
0
 def retrieve(self, request, pk=None, channel_id=None):
     with using_content_database("default"):
         channel = serializers.ChannelMetadataSerializer(models.ChannelMetadata.objects.get(channel_id=channel_id),
                                                         context={'request': request}).data
         return Response(channel)
Пример #20
0
 def list(self, request, channel_pk=None):
     with using_content_database("default"):
         channels = serializers.ChannelMetadataSerializer(models.ChannelMetadata.objects.all(),
                                                          context={'request': request}, many=True).data
         return Response(channels)
Пример #21
0
 def list(self, request, channelmetadata_channel_id=None):
     with using_content_database(channelmetadata_channel_id):
         context = {'request': request, 'channel_id': channelmetadata_channel_id}
         files = serializers.FileSerializer(models.File.objects.all(), context=context, many=True).data
         return Response(files)