Exemple #1
0
    def import_boot_images(self, request, system_id):
        """@description-title Import boot images
        @description Import boot images on a given rack controller or all
        rack controllers.

        @param (string) "{system_id}" [required=true] A rack controller
        system_id.

        @success (http-status-code) "200" 200
        @success (content) "success-single" Import of boot images started on
        <rack controller name>

        @error (http-status-code) "404" 404
        @error (content) "not-found" The requested rack controller system_id
        is not found.
        @error-example "not-found"
            Not Found
        """
        # Avoid circular import.
        from maasserver.clusterrpc.boot_images import RackControllersImporter

        rack = self.model.objects.get_node_or_404(system_id=system_id,
                                                  user=request.user,
                                                  perm=NodePermission.admin)
        post_commit_do(RackControllersImporter.schedule, rack.system_id)
        return HttpResponse(
            "Import of boot images started on %s" % rack.hostname,
            content_type=("text/plain; charset=%s" % settings.DEFAULT_CHARSET))
Exemple #2
0
 def _execute(self):
     """See `NodeAction.execute`."""
     try:
         post_commit_do(RackControllersImporter.schedule,
                        self.node.system_id)
     except RPC_EXCEPTIONS as exception:
         raise NodeActionError(exception)
Exemple #3
0
def update_boot_source_cache(sender, instance, *args, **kwargs):
    """Update the `BootSourceCache` using the updated source.

    This only begins after a successful commit to the database, and is then
    run in a thread. Nothing waits for its completion.
    """
    post_commit_do(reactor.callLater, 0, cache_boot_sources)
Exemple #4
0
def delete_large_object(sender, instance, **kwargs):
    """Delete the large object when the `LargeFile` is deleted.

    This is done using the `post_delete` signal instead of overriding delete
    on `LargeFile`, so it works correctly for both the model and `QuerySet`.
    """
    if instance.content is not None:
        post_commit_do(delete_large_object_content_later, instance.content)
Exemple #5
0
    def import_boot_images(self, request):
        """Import the boot images on all rack controllers."""
        # Avoid circular import.
        from maasserver.clusterrpc.boot_images import RackControllersImporter

        post_commit_do(RackControllersImporter.schedule)
        return HttpResponse(
            "Import of boot images started on all rack controllers",
            content_type=("text/plain; charset=%s" % settings.DEFAULT_CHARSET))
Exemple #6
0
def inner_start_up(master=False):
    """Startup jobs that must run serialized w.r.t. other starting servers."""
    # Register our MAC data type with psycopg.
    register_mac_type(connection.cursor())

    # All commissioning and testing scripts are stored in the database. For
    # a commissioning ScriptSet to be created Scripts must exist first. Call
    # this early, only on the master process, to ensure they exist and are
    # only created once. If get_or_create_running_controller() is called before
    # this it will fail on first run.
    if master:
        load_builtin_scripts()

    # Ensure the this region is represented in the database. The first regiond
    # to pass through inner_start_up on this host can do this; it should NOT
    # be restricted to masters only. This also ensures that the MAAS ID is set
    # on the filesystem; it will be done in a post-commit hook and will thus
    # happen before `locks.startup` is released.
    region = RegionController.objects.get_or_create_running_controller()
    # Ensure that uuid is created after creating
    RegionController.objects.get_or_create_uuid()

    # Only perform the following if the master process for the
    # region controller.
    if master:
        # Freshen the kms SRV records.
        dns_kms_setting_changed()

        # Make sure the commissioning distro series is still a supported LTS.
        commissioning_distro_series = Config.objects.get_config(
            name="commissioning_distro_series")
        ubuntu = UbuntuOS()
        if commissioning_distro_series not in (
                ubuntu.get_supported_commissioning_releases()):
            Config.objects.set_config(
                "commissioning_distro_series",
                ubuntu.get_default_commissioning_release(),
            )
            Notification.objects.create_info_for_admins(
                "Ubuntu %s is no longer a supported commissioning "
                "series. Ubuntu %s has been automatically selected." % (
                    commissioning_distro_series,
                    ubuntu.get_default_commissioning_release(),
                ),
                ident="commissioning_release_deprecated",
            )

        # Update deprecation notifications if needed
        sync_deprecation_notifications()

        # Refresh soon after this transaction is in.
        post_commit_do(reactor.callLater, 0, refreshRegion, region)

        # Create a certificate for the region.
        post_commit_do(reactor.callLater, 0, generate_certificate_if_needed)
Exemple #7
0
    def _populate_nodes_later(self):
        """Find all nodes that match this tag, and update them, later.

        This schedules population to happen post-commit, without waiting for
        its outcome.
        """
        # Avoid circular imports.
        from maasserver.populate_tags import populate_tags

        if self.is_defined:
            # Schedule repopulate to happen after commit. This thread does not
            # wait for it to complete.
            post_commit_do(reactor.callLater, 0, deferToDatabase,
                           populate_tags, self)
Exemple #8
0
    def import_boot_images(self, request, system_id):
        """Import the boot images on this rack controller.

        Returns 404 if the rack controller is not found.
        """
        # Avoid circular import.
        from maasserver.clusterrpc.boot_images import RackControllersImporter

        rack = self.model.objects.get_node_or_404(
            system_id=system_id, user=request.user, perm=NODE_PERMISSION.EDIT)
        post_commit_do(RackControllersImporter.schedule, rack.system_id)
        return HttpResponse(
            "Import of boot images started on %s" % rack.hostname,
            content_type=("text/plain; charset=%s" % settings.DEFAULT_CHARSET))
Exemple #9
0
    def update(self, request, id, file_id):
        """Upload piece of boot resource file."""
        resource = get_object_or_404(BootResource, id=id)
        rfile = get_object_or_404(BootResourceFile, id=file_id)
        size = int(request.META.get("CONTENT_LENGTH", "0"))
        data = request.body
        if size == 0:
            raise MAASAPIBadRequest("Missing data.")
        if size != len(data):
            raise MAASAPIBadRequest(
                "Content-Length doesn't equal size of recieved data."
            )
        if resource.rtype not in ALLOW_UPLOAD_RTYPES:
            raise MAASAPIForbidden(
                "Cannot upload to a resource of type: %s. " % resource.rtype
            )
        if rfile.largefile.complete:
            raise MAASAPIBadRequest("Cannot upload to a complete file.")

        with rfile.largefile.content.open("wb") as stream:
            stream.seek(0, os.SEEK_END)

            # Check that the uploading data will not make the file larger
            # than expected.
            current_size = stream.tell()
            if current_size + size > rfile.largefile.total_size:
                raise MAASAPIBadRequest("Too much data recieved.")

            stream.write(data)
            rfile.largefile.size = current_size + size
            rfile.largefile.save()

        if rfile.largefile.complete:
            if not rfile.largefile.valid:
                raise MAASAPIBadRequest(
                    "Saved content does not match given SHA256 value."
                )
            # Avoid circular import.
            from maasserver.clusterrpc.boot_images import (
                RackControllersImporter,
            )

            post_commit_do(RackControllersImporter.schedule)
        return rc.ALL_OK
Exemple #10
0
def inner_start_up():
    """Startup jobs that must run serialized w.r.t. other starting servers."""
    # Register our MAC data type with psycopg.
    register_mac_type(connection.cursor())

    # Ensure the this region is represented in the database. The first regiond
    # to pass through inner_start_up on this host can do this; it should NOT
    # be restricted to masters only. This also ensures that the MAAS ID is set
    # on the filesystem; it will be done in a post-commit hook and will thus
    # happen before `locks.startup` is released.
    region = RegionController.objects.get_or_create_running_controller()

    # Only perform the following if the master process for the
    # region controller.
    if is_master_process():
        # Freshen the kms SRV records.
        dns_kms_setting_changed()
        # Add or update all builtin scripts
        load_builtin_scripts()
        # Refresh soon after this transaction is in.
        post_commit_do(reactor.callLater, 0, refreshRegion, region)
Exemple #11
0
    def import_boot_images(self, request):
        """@description-title Import boot images on all rack controllers
        @description Imports boot images on all rack controllers.

        @success (http-status-code) "200" 200
        @success (content) "success-all" Import of boot images started on
        all rack controllers

        @error (http-status-code) "404" 404
        @error (content) "not-found" The requested rack controller system_id
        is not found.
        @error-example "not-found"
            Not Found
        """
        # Avoid circular import.
        from maasserver.clusterrpc.boot_images import RackControllersImporter

        post_commit_do(RackControllersImporter.schedule)
        return HttpResponse(
            "Import of boot images started on all rack controllers",
            content_type=("text/plain; charset=%s" % settings.DEFAULT_CHARSET))
Exemple #12
0
    def create(self, request):
        """@description-title Upload a new boot resource
        @description Uploads a new boot resource.

        @param (string) "name" [required=true] Name of the boot resource.

        @param (string) "architecture" [required=true] Architecture the boot
        resource supports.

        @param (string) "sha256" [required=true] The ``sha256`` hash of the
        resource.

        @param (string) "size" [required=true] The size of the resource in
        bytes.

        @param (string) "title" [required=false] Title for the boot resource.

        @param (string) "filetype" [required=false] Filetype for uploaded
        content. (Default: ``tgz``. Supported: ``tgz``, ``tbz``, ``txz``,
        ``ddtgz``, ``ddtbz``, ``ddtxz``, ``ddtar``, ``ddbz2``, ``ddgz``,
        ``ddxz``, ``ddraw``)

        @param (string) "content" [required=false] Image content. Note: this is
        not a normal parameter, but an ``application/octet-stream`` file
        upload.

        @success (http-status-code) "server-success" 201
        @success (json) "success-json" A JSON object containing information
        about the uploaded resource.
        @success-example "success-json" [exkey=boot-res-create] placeholder
        text
        """
        # If the user provides no parameters to the create command, then
        # django will treat the form as valid, and so it won't actually
        # validate any of the data.
        data = request.data.copy()
        if data is None:
            data = {}
        if "filetype" not in data:
            data["filetype"] = "tgz"
        file_content = get_content_parameter(request)
        if file_content is not None:
            content = SimpleUploadedFile(
                content=file_content,
                name="file",
                content_type="application/octet-stream",
            )
            form = BootResourceForm(data=data, files={"content": content})
        else:
            form = BootResourceNoContentForm(data=data)
        if not form.is_valid():
            raise MAASAPIValidationError(form.errors)
        resource = form.save()

        # If an upload contained the full file, then we can have the clusters
        # sync a new resource.
        if file_content is not None:
            # Avoid circular import.
            from maasserver.clusterrpc.boot_images import (
                RackControllersImporter, )

            post_commit_do(RackControllersImporter.schedule)

        stream = json_object(boot_resource_to_dict(resource, with_sets=True),
                             request)
        return HttpResponse(
            stream,
            content_type="application/json; charset=utf-8",
            status=int(http.client.CREATED),
        )
Exemple #13
0
    def compose(
        self,
        timeout=120,
        creation_type=NODE_CREATION_TYPE.MANUAL,
        skip_commissioning=None,
    ):
        """Compose the machine.

        Internal operation of this form is asynchronous. It will block the
        calling thread until the asynchronous operation is complete. Adjust
        `timeout` to minimize the maximum wait for the asynchronous operation.
        """

        if skip_commissioning is None:
            skip_commissioning = self.get_value_for("skip_commissioning")

        def db_work(client):
            # Check overcommit ratios.
            over_commit_message = self.pod.check_over_commit_ratios(
                requested_cores=self.get_value_for("cores"),
                requested_memory=self.get_value_for("memory"),
            )
            if over_commit_message:
                raise PodProblem("Unable to compose KVM instance in '%s'. %s" %
                                 (self.pod.name, over_commit_message))

            # Update the default storage pool.
            if self.pod.default_storage_pool is not None:
                power_parameters[
                    "default_storage_pool_id"] = self.pod.default_storage_pool.pool_id

            # Find the pod's known host interfaces.
            if self.pod.host is not None:
                interfaces = get_known_host_interfaces(self.pod.host)
            else:
                interfaces = []

            return client, interfaces

        def create_and_sync(result):
            requested_machine, result = result
            discovered_machine, pod_hints = result
            created_machine = self.pod.create_machine(
                discovered_machine,
                self.request.user,
                skip_commissioning=skip_commissioning,
                creation_type=creation_type,
                interfaces=self.get_value_for("interfaces"),
                requested_machine=requested_machine,
                domain=self.get_value_for("domain"),
                pool=self.get_value_for("pool"),
                zone=self.get_value_for("zone"),
            )
            self.pod.sync_hints(pod_hints)
            return created_machine

        @inlineCallbacks
        def async_compose_machine(result, power_type, power_paramaters,
                                  **kwargs):
            client, result = result
            requested_machine = yield deferToDatabase(
                self.get_requested_machine, result)
            result = yield compose_machine(
                client,
                power_type,
                power_paramaters,
                requested_machine,
                **kwargs,
            )
            return requested_machine, result

        power_parameters = self.pod.power_parameters.copy()

        if isInIOThread():
            # Running under the twisted reactor, before the work from inside.
            d = deferToDatabase(transactional(self.pod.get_client_identifiers))
            d.addCallback(getClientFromIdentifiers)
            d.addCallback(partial(deferToDatabase, transactional(db_work)))
            d.addCallback(
                async_compose_machine,
                self.pod.power_type,
                power_parameters,
                pod_id=self.pod.id,
                name=self.pod.name,
            )
            d.addCallback(
                partial(deferToDatabase, transactional(create_and_sync)))
            d.addCallback(
                lambda created_machine, _: created_machine,
                request_commissioning_results(self.pod),
            )
            return d
        else:
            # Running outside of reactor. Do the work inside and then finish
            # the work outside.
            @asynchronous
            def wrap_compose_machine(client_idents, pod_type, parameters,
                                     request, pod_id, name):
                """Wrapper to get the client."""
                d = getClientFromIdentifiers(client_idents)
                d.addCallback(
                    compose_machine,
                    pod_type,
                    parameters,
                    request,
                    pod_id=pod_id,
                    name=name,
                )
                return d

            _, result = db_work(None)
            try:
                requested_machine = self.get_requested_machine(result)
                result = wrap_compose_machine(
                    self.pod.get_client_identifiers(),
                    self.pod.power_type,
                    power_parameters,
                    requested_machine,
                    pod_id=self.pod.id,
                    name=self.pod.name,
                ).wait(timeout)
            except crochet.TimeoutError:
                raise PodProblem(
                    "Unable to compose a machine because '%s' driver "
                    "timed out after %d seconds." %
                    (self.pod.power_type, timeout))
            created_machine = create_and_sync((requested_machine, result))
            post_commit_do(reactor.callLater, 0, request_commissioning_results,
                           self.pod)
            return created_machine
Exemple #14
0
    def discover_and_sync_pod(self):
        """Discover and sync the pod information."""
        def update_db(result):
            discovered_pod, discovered = result

            if self.request is not None:
                user = self.request.user
            else:
                user = self.user
            # If this is a new instance it will be stored in the database
            # at the end of sync.
            self.instance.sync(discovered_pod, user)

            # Save which rack controllers can route and which cannot.
            discovered_rack_ids = [
                rack_id for rack_id, _ in discovered[0].items()
            ]
            for rack_controller in RackController.objects.all():
                routable = rack_controller.system_id in discovered_rack_ids
                bmc_route_model = BMCRoutableRackControllerRelationship
                relation, created = bmc_route_model.objects.get_or_create(
                    bmc=self.instance.as_bmc(),
                    rack_controller=rack_controller,
                    defaults={"routable": routable},
                )
                if not created and relation.routable != routable:
                    relation.routable = routable
                    relation.save()
            return self.instance

        if isInIOThread():
            # Running in twisted reactor, do the work inside the reactor.
            d = discover_pod(
                self.instance.power_type,
                self.instance.power_parameters,
                pod_id=self.instance.id,
                name=self.instance.name,
            )
            d.addCallback(lambda discovered: (
                get_best_discovered_result(discovered),
                discovered,
            ))

            def catch_no_racks(result):
                discovered_pod, discovered = result
                if discovered_pod is None:
                    raise PodProblem(
                        "Unable to start the pod discovery process. "
                        "No rack controllers connected.")
                return discovered_pod, discovered

            def wrap_errors(failure):
                if failure.check(PodProblem):
                    return failure
                else:
                    log.err(failure, "Failed to discover pod.")
                    raise PodProblem(str(failure.value))

            d.addCallback(catch_no_racks)
            d.addCallback(partial(deferToDatabase, transactional(update_db)))
            d.addCallback(request_commissioning_results)
            d.addErrback(wrap_errors)
            return d
        else:
            # Perform the actions inside the executing thread.
            try:
                discovered = discover_pod(
                    self.instance.power_type,
                    self.instance.power_parameters,
                    pod_id=self.instance.id,
                    name=self.instance.name,
                )
            except Exception as exc:
                raise PodProblem(str(exc)) from exc

            # Use the first discovered pod object. All other objects are
            # ignored. The other rack controllers that also provided a result
            # can route to the pod.
            try:
                discovered_pod = get_best_discovered_result(discovered)
            except Exception as error:
                raise PodProblem(str(error))
            if discovered_pod is None:
                raise PodProblem("Unable to start the pod discovery process. "
                                 "No rack controllers connected.")
            update_db((discovered_pod, discovered))
            # The data isn't committed to the database until the transaction is
            # complete. The commissioning results must be sent after the
            # transaction completes so the metadata server can process the
            # data.
            post_commit_do(
                reactor.callLater,
                0,
                request_commissioning_results,
                self.instance,
            )
            # Run commissioning request here
            return self.instance