Esempio n. 1
0
    def channeldiffstats(self, request):
        job_metadata = {}
        channel_id = request.data.get("channel_id")
        method = request.data.get("method")
        drive_id = request.data.get("drive_id")
        baseurl = request.data.get("baseurl")

        # request validation and job metadata info
        if not channel_id:
            raise serializers.ValidationError("The channel_id field is required.")
        if not method:
            raise serializers.ValidationError("The method field is required.")

        if method == "network":
            baseurl = baseurl or conf.OPTIONS["Urls"]["CENTRAL_CONTENT_BASE_URL"]
            job_metadata["baseurl"] = baseurl
            # get channel version metadata
            url = get_channel_lookup_url(baseurl=baseurl, identifier=channel_id)
            resp = requests.get(url)
            channel_metadata = resp.json()
            job_metadata["new_channel_version"] = channel_metadata[0]["version"]
        elif method == "disk":
            if not drive_id:
                raise serializers.ValidationError(
                    "The drive_id field is required when using 'disk' method."
                )
            job_metadata = _add_drive_info(job_metadata, request.data)
            # get channel version metadata
            drive = get_mounted_drive_by_id(drive_id)
            channel_metadata = read_channel_metadata_from_db_file(
                get_content_database_file_path(channel_id, drive.datafolder)
            )
            job_metadata["new_channel_version"] = channel_metadata.version
        else:
            raise serializers.ValidationError(
                "'method' field should either be 'network' or 'disk'."
            )

        job_metadata.update(
            {
                "type": "CHANNELDIFFSTATS",
                "started_by": request.user.pk,
                "channel_id": channel_id,
            }
        )

        job_id = priority_queue.enqueue(
            diff_stats,
            channel_id,
            method,
            drive_id=drive_id,
            baseurl=baseurl,
            extra_metadata=job_metadata,
            track_progress=False,
            cancellable=True,
        )

        resp = _job_to_response(priority_queue.fetch_job(job_id))

        return Response(resp)
Esempio n. 2
0
    def exportuserstocsv(self, request):
        """
        Export users, classes, roles and roles assignemnts to a csv file.

        :returns: An object with the job information
        """
        facility = request.user.facility.id
        job_type = "EXPORTUSERSTOCSV"
        job_metadata = {
            "type": job_type,
            "started_by": request.user.pk,
            "facility": facility,
        }
        locale = get_language_from_request(request)

        job_id = priority_queue.enqueue(
            call_command,
            "bulkexportusers",
            facility=facility,
            locale=locale,
            overwrite="true",
            extra_metadata=job_metadata,
            track_progress=True,
        )

        resp = _job_to_response(priority_queue.fetch_job(job_id))

        return Response(resp)
Esempio n. 3
0
    def startexportlogcsv(self, request):
        """
        Dumps in csv format the required logs.
        By default it will be dump contentsummarylog.

        :param: logtype: Kind of log to dump, summary or session
        :param: facility
        :returns: An object with the job information

        """
        facility_id = request.data.get("facility", None)
        if facility_id:
            facility = Facility.objects.get(pk=facility_id)
        else:
            facility = request.user.facility

        log_type = request.data.get("logtype", "summary")
        if log_type in CSV_EXPORT_FILENAMES.keys():
            logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
            filepath = os.path.join(
                logs_dir,
                CSV_EXPORT_FILENAMES[log_type].format(facility.name, facility_id[:4]),
            )
        else:
            raise Http404(
                "Impossible to create a csv export file for {}".format(log_type)
            )
        if not os.path.isdir(logs_dir):
            os.mkdir(logs_dir)

        job_type = (
            "EXPORTSUMMARYLOGCSV" if log_type == "summary" else "EXPORTSESSIONLOGCSV"
        )

        job_metadata = {
            "type": job_type,
            "started_by": request.user.pk,
            "facility": facility.id,
        }

        job_id = priority_queue.enqueue(
            call_command,
            "exportlogs",
            log_type=log_type,
            output_file=filepath,
            facility=facility.id,
            overwrite="true",
            extra_metadata=job_metadata,
            track_progress=True,
        )

        resp = _job_to_response(priority_queue.fetch_job(job_id))

        return Response(resp)
Esempio n. 4
0
    def startdiskchannelimport(self, request):
        task = validate_local_import_task(request, request.data)

        task.update({"type": "DISKCHANNELIMPORT"})

        job_id = priority_queue.enqueue(
            call_command,
            "importchannel",
            "disk",
            task["channel_id"],
            task["datafolder"],
            drive_id=task["drive_id"],
            extra_metadata=task,
            cancellable=True,
        )

        resp = _job_to_response(priority_queue.fetch_job(job_id))
        return Response(resp)
Esempio n. 5
0
    def startremotechannelimport(self, request):

        task = validate_remote_import_task(request, request.data)

        task.update({"type": "REMOTECHANNELIMPORT"})

        job_id = priority_queue.enqueue(
            call_command,
            "importchannel",
            "network",
            task["channel_id"],
            baseurl=task["baseurl"],
            peer_id=task["peer_id"],
            extra_metadata=task,
            cancellable=True,
        )
        resp = _job_to_response(priority_queue.fetch_job(job_id))

        return Response(resp)
Esempio n. 6
0
    def startexportlogcsv(self, request):
        """
        Dumps in csv format the required logs.
        By default it will be dump contentsummarylog.

        :param: logtype: Kind of log to dump, summary or session
        :returns: An object with the job information

        """
        csv_export_filenames = {
            "session": "content_session_logs.csv",
            "summary": "content_summary_logs.csv",
        }
        log_type = request.data.get("logtype", "summary")
        if log_type in csv_export_filenames.keys():
            logs_dir = os.path.join(conf.KOLIBRI_HOME, "log_export")
            filepath = os.path.join(logs_dir, csv_export_filenames[log_type])
        else:
            raise Http404(
                "Impossible to create a csv export file for {}".format(
                    log_type))
        if not os.path.isdir(logs_dir):
            os.mkdir(logs_dir)

        job_type = ("EXPORTSUMMARYLOGCSV"
                    if log_type == "summary" else "EXPORTSESSIONLOGCSV")

        job_metadata = {"type": job_type, "started_by": request.user.pk}

        job_id = priority_queue.enqueue(
            call_command,
            "exportlogs",
            log_type=log_type,
            output_file=filepath,
            overwrite="true",
            extra_metadata=job_metadata,
            track_progress=True,
        )

        resp = _job_to_response(priority_queue.fetch_job(job_id))

        return Response(resp)
Esempio n. 7
0
    def exportuserstocsv(self, request):
        """
        Export users, classes, roles and roles assignemnts to a csv file.

        :param: facility_id
        :returns: An object with the job information

        """
        facility_id = request.data.get("facility_id", None)

        try:
            if facility_id:
                facility = Facility.objects.get(pk=facility_id).id
            else:
                facility = request.user.facility
        except Facility.DoesNotExist:
            raise serializers.ValidationError(
                "Facility with ID {} does not exist".format(facility_id)
            )

        job_type = "EXPORTUSERSTOCSV"
        job_metadata = {
            "type": job_type,
            "started_by": request.user.pk,
            "facility": facility,
        }
        locale = get_language_from_request(request)

        job_id = priority_queue.enqueue(
            call_command,
            "bulkexportusers",
            facility=facility,
            locale=locale,
            overwrite="true",
            extra_metadata=job_metadata,
            track_progress=True,
        )

        resp = _job_to_response(priority_queue.fetch_job(job_id))

        return Response(resp)
Esempio n. 8
0
    def importusersfromcsv(self, request):
        """
        Import users, classes, roles and roles assignemnts from a csv file.
        :param: FILE: file dictionary with the file object
        :param: csvfile: filename of the file stored in kolibri temp folder
        :param: dryrun: validate the data but don't modify the database
        :param: delete: Users not in the csv will be deleted from the facility, and classes cleared
        :returns: An object with the job information
        """

        def manage_fileobject(request, temp_dir):
            upload = UploadedFile(request.FILES["csvfile"])
            # Django uses InMemoryUploadedFile for files less than 2.5Mb
            # and TemporaryUploadedFile for bigger files:
            if type(upload.file) == InMemoryUploadedFile:
                _, filepath = mkstemp(dir=temp_dir, suffix=".upload")
                with open(filepath, "w+b") as dest:
                    filepath = dest.name
                    for chunk in upload.file.chunks():
                        dest.write(chunk)
            else:
                tmpfile = upload.file.temporary_file_path()
                filename = ntpath.basename(tmpfile)
                filepath = os.path.join(temp_dir, filename)
                shutil.copy(tmpfile, filepath)
            return filepath

        temp_dir = os.path.join(conf.KOLIBRI_HOME, "temp")
        if not os.path.isdir(temp_dir):
            os.mkdir(temp_dir)

        locale = get_language_from_request(request)
        # the request must contain either an object file
        # or the filename of the csv stored in Kolibri temp folder
        # Validation will provide the file object, while
        # Importing will provide the filename, previously validated
        if not request.FILES:
            filename = request.data.get("csvfile", None)
            if filename:
                filepath = os.path.join(temp_dir, filename)
            else:
                return HttpResponseBadRequest("The request must contain a file object")
        else:
            if "csvfile" not in request.FILES:
                return HttpResponseBadRequest("Wrong file object")
            filepath = manage_fileobject(request, temp_dir)

        delete = request.data.get("delete", None)
        dryrun = request.data.get("dryrun", None)
        userid = request.user.pk
        facility_id = request.data.get("facility_id", None)
        job_type = "IMPORTUSERSFROMCSV"
        job_metadata = {"type": job_type, "started_by": userid, "facility": facility_id}
        job_args = ["bulkimportusers"]
        if dryrun:
            job_args.append("--dryrun")
        if delete:
            job_args.append("--delete")
        job_args.append(filepath)

        job_kwd_args = {
            "facility": facility_id,
            "userid": userid,
            "locale": locale,
            "extra_metadata": job_metadata,
            "track_progress": True,
        }

        job_id = priority_queue.enqueue(call_command, *job_args, **job_kwd_args)

        resp = _job_to_response(priority_queue.fetch_job(job_id))

        return Response(resp)