Esempio n. 1
0
class ControllerInfo(CleanSave, TimestampedModel):
    """A `ControllerInfo` represents metadata about nodes that are Controllers.

    :ivar node: `Node` this `ControllerInfo` represents metadata for.
    :ivar version: The last known version of the controller.
    :ivar interfaces: Interfaces JSON last sent by the controller.
    :ivar interface_udpate_hints: Topology hints last sent by the controller
        during a call to update_interfaces().
    """
    class Meta(DefaultMeta):
        verbose_name = "ControllerInfo"

    objects = ControllerInfoManager()

    node = OneToOneField(Node,
                         null=False,
                         blank=False,
                         on_delete=CASCADE,
                         primary_key=True)

    version = CharField(max_length=255, null=True, blank=True)

    interfaces = JSONObjectField(max_length=(2**15), blank=True, default="")

    interface_update_hints = JSONObjectField(max_length=(2**15),
                                             blank=True,
                                             default="")

    def __str__(self):
        return "%s (%s)" % (self.__class__.__name__, self.node.hostname)
Esempio n. 2
0
class BootSourceCache(CleanSave, TimestampedModel):
    """A cache of an image provided in boot source."""
    class Meta(DefaultMeta):
        """Needed for South to recognize this model."""

    objects = BootSourceCacheManager()

    boot_source = ForeignKey(BootSource, blank=False, on_delete=CASCADE)

    os = CharField(max_length=32, blank=False, null=False)

    bootloader_type = CharField(max_length=32, blank=True, null=True)

    arch = CharField(max_length=32, blank=False, null=False)

    subarch = CharField(max_length=32, blank=False, null=False)

    kflavor = CharField(max_length=32, blank=True, null=True)

    release = CharField(max_length=32, blank=False, null=False)

    label = CharField(max_length=32, blank=False, null=False)

    release_codename = CharField(max_length=255, blank=True, null=True)

    release_title = CharField(max_length=255, blank=True, null=True)

    support_eol = DateField(null=True, blank=True)

    extra = JSONObjectField(blank=True, default="", editable=False)

    def __str__(self):
        return ("<BootSourceCache os=%s, release=%s, arch=%s, subarch=%s, "
                "kflavor=%s>" %
                (self.os, self.release, self.arch, self.subarch, self.kflavor))
Esempio n. 3
0
class RDNS(CleanSave, TimestampedModel):
    """Represents data gathered from reverse DNS for a particular IP address.

    :ivar ip: Observed IP address.
    :ivar hostname: Most recent reverse DNS entry.
    """
    class Meta(DefaultMeta):
        verbose_name = "Reverse-DNS entry"
        verbose_name_plural = "Reverse-DNS entries"
        unique_together = ("ip", "observer")

    objects = RDNSManager()

    # IP address for the reverse-DNS entry.
    ip = MAASIPAddressField(
        unique=False,
        null=False,
        editable=False,
        blank=False,
        verbose_name="IP",
    )

    # "Primary" reverse-DNS hostname. (Since reverse DNS lookups can return
    # more than one entry, we'll need to make an educated guess as to which
    # is the "primary".) This will be coalesced with the other data in the
    # discovery view to present the default hostname for the IP.
    hostname = CharField(max_length=256,
                         editable=True,
                         null=True,
                         blank=False,
                         unique=False)

    # List of all hostnames returned by the lookup. (Useful for
    # support/debugging, in case we guess incorrectly about the "primary"
    # hostname -- and in case we ever want to show them all.)
    hostnames = JSONObjectField()

    # Region controller that observed the hostname.
    observer = ForeignKey(
        "Node",
        unique=False,
        blank=False,
        null=False,
        editable=False,
        on_delete=CASCADE,
    )
Esempio n. 4
0
class Config(Model):
    """Configuration settings item.

    :ivar name: The name of the configuration option.
    :type name: unicode
    :ivar value: The configuration value.
    :type value: Any pickleable python object.
    """
    class Meta(DefaultMeta):
        """Needed for South to recognize this model."""

    name = CharField(max_length=255, unique=True)
    value = JSONObjectField(null=True)

    objects = ConfigManager()

    def __str__(self):
        return "%s: %s" % (self.name, self.value)
Esempio n. 5
0
class BootResourceFile(CleanSave, TimestampedModel):
    """File associated with a `BootResourceSet`.

    Each `BootResourceSet` contains a set of files. For user uploaded boot
    resources this is only one file. For synced and generated resources this
    can be multiple files.

    :ivar resource_set: `BootResourceSet` file belongs to. When
        `BootResourceSet` is deleted, this `BootResourceFile` will be deleted.
    :ivar largefile: Actual file information and data. See
        :class:`LargeFile`.
    :ivar filename: Name of the file.
    :ivar filetype: Type of the file. See the vocabulary
        :class:`BOOT_RESOURCE_FILE_TYPE`.
    :ivar extra: Extra information about the file. This is only used
        for synced Ubuntu images.
    """
    class Meta(DefaultMeta):
        unique_together = (("resource_set", "filename"), )

    resource_set = ForeignKey(
        BootResourceSet,
        related_name="files",
        editable=False,
        on_delete=CASCADE,
    )

    largefile = ForeignKey(LargeFile, editable=False, on_delete=CASCADE)

    filename = CharField(max_length=255, editable=False)

    filetype = CharField(
        max_length=20,
        choices=BOOT_RESOURCE_FILE_TYPE_CHOICES,
        default=BOOT_RESOURCE_FILE_TYPE.ROOT_TGZ,
        editable=False,
    )

    extra = JSONObjectField(blank=True, default="", editable=False)

    def __str__(self):
        return "<BootResourceFile %s/%s>" % (self.filename, self.filetype)
Esempio n. 6
0
class Switch(CleanSave, TimestampedModel):
    """A `Switch` represents an networking switch `Node` in the network.

    :ivar node: `Node` this `Switch` represents switch metadata for.
    :ivar nos_driver: The NOS driver defines which Network Operating System
        this switch uses.
    :ivar nos_parameters: Some JSON containing arbitrary parameters this
        Switch's NOS requires to function.
    :ivar objects: the switch manager class.
    """
    class Meta(DefaultMeta):
        verbose_name = "Switch"
        verbose_name_plural = "Switches"

    objects = Manager()

    node = OneToOneField(Node,
                         null=False,
                         blank=False,
                         on_delete=CASCADE,
                         primary_key=True)

    # The possible choices for this field depend on the NOS drivers advertised
    # by the rack controllers.  This needs to be populated on the fly, in
    # forms.py, each time the form to edit a node is instantiated.
    nos_driver = CharField(max_length=64, null=False, blank=True, default="")

    # JSON-encoded set of parameters for the NOS driver, limited to 32kiB when
    # encoded as JSON.
    nos_parameters = JSONObjectField(max_length=(2**15),
                                     blank=True,
                                     default="")

    def __str__(self):
        return "%s (%s)" % (self.__class__.__name__, self.node.hostname)

    def delete(self):
        """Delete this switch."""
        maaslog.info("%s: Deleting switch", self)
        super(Switch, self).delete()
Esempio n. 7
0
class BootResource(CleanSave, TimestampedModel):
    """Boot resource.

    Each `BootResource` represents a os/series combination or custom uploaded
    image that maps to a specific architecture that a node can use to
    commission or install.

    `BootResource` can have multiple `BootResourceSet` corresponding to
    different versions of this `BootResource`. When a node selects this
    `BootResource` the newest `BootResourceSet` is used to deploy to the node.

    :ivar rtype: Type of `BootResource`. See the vocabulary
        :class:`BOOT_RESOURCE_TYPE`.
    :ivar name: Name of the `BootResource`. If its BOOT_RESOURCE_TYPE.UPLOADED
        then `name` is used to reference this image. If its
        BOOT_RESOURCE_TYPE.SYCNED or BOOT_RESOURCE_TYPE.GENERATED then its
        in the format of os/series.
    :ivar architecture: Architecture of the `BootResource`. It must be in
        the format arch/subarch.
    :ivar extra: Extra information about the file. This is only used
        for synced Ubuntu images.
    """

    class Meta(DefaultMeta):
        unique_together = (("name", "architecture"),)

    objects = BootResourceManager()

    rtype = IntegerField(choices=BOOT_RESOURCE_TYPE_CHOICES, editable=False)

    name = CharField(max_length=255, blank=False)

    architecture = CharField(
        max_length=255, blank=False, validators=[validate_architecture]
    )

    bootloader_type = CharField(max_length=32, blank=True, null=True)

    kflavor = CharField(max_length=32, blank=True, null=True)

    # The hwe-rolling kernel is a meta-package which depends on the latest
    # kernel available. Instead of placing a duplicate kernel in the stream
    # SimpleStreams adds a boolean field to indicate that the hwe-rolling
    # kernel meta-package points to this kernel. When the rolling field is set
    # true MAAS allows users to deploy the hwe-rolling kernel by using this
    # BootResource kernel and instructs Curtin to install the meta-package.
    rolling = BooleanField(blank=False, null=False, default=False)

    extra = JSONObjectField(blank=True, default="", editable=False)

    def __str__(self):
        return "<BootResource name=%s, arch=%s, kflavor=%s>" % (
            self.name,
            self.architecture,
            self.kflavor,
        )

    @property
    def display_rtype(self):
        """Return rtype text as displayed to the user."""
        return BOOT_RESOURCE_TYPE_CHOICES_DICT[self.rtype]

    def clean(self):
        """Validate the model.

        Checks that the name is in a valid format, for its type.
        """
        if self.rtype == BOOT_RESOURCE_TYPE.UPLOADED:
            if "/" in self.name:
                os_name = self.name.split("/")[0]
                osystem = OperatingSystemRegistry.get_item(os_name)
                if osystem is None:
                    raise ValidationError(
                        "%s boot resource cannot contain a '/' in it's name "
                        "unless it starts with a supported operating system."
                        % (self.display_rtype)
                    )
        elif self.rtype in RTYPE_REQUIRING_OS_SERIES_NAME:
            if "/" not in self.name:
                raise ValidationError(
                    "%s boot resource must contain a '/' in it's name."
                    % (self.display_rtype)
                )

    def unique_error_message(self, model_class, unique_check):
        if unique_check == ("name", "architecture"):
            return "Boot resource of name, and architecture already exists."
        return super().unique_error_message(model_class, unique_check)

    def get_latest_set(self):
        """Return latest `BootResourceSet`."""
        if (
            not hasattr(self, "_prefetched_objects_cache")
            or "sets" not in self._prefetched_objects_cache
        ):
            return self.sets.order_by("id").last()
        elif self.sets.all():
            return sorted(self.sets.all(), key=attrgetter("id"), reverse=True)[
                0
            ]
        else:
            return None

    def get_latest_complete_set(self):
        """Return latest `BootResourceSet` where all `BootResouceFile`'s
        are complete."""
        if (
            not hasattr(self, "_prefetched_objects_cache")
            or "sets" not in self._prefetched_objects_cache
        ):
            resource_sets = self.sets.order_by("-id").annotate(
                files_count=Count("files__id"),
                files_size=Sum("files__largefile__size"),
                files_total_size=Sum("files__largefile__total_size"),
            )
        else:
            resource_sets = sorted(
                self.sets.all(), key=attrgetter("id"), reverse=True
            )
        for resource_set in resource_sets:
            if (
                resource_set.files_count > 0
                and resource_set.files_size == resource_set.files_total_size
            ):
                return resource_set
        return None

    def split_arch(self):
        return self.architecture.split("/")

    def get_next_version_name(self):
        """Return the version a `BootResourceSet` should use when adding to
        this resource.

        The version naming is specific to how the resource sets will be sorted
        by simplestreams. The version name is YYYYmmdd, with an optional
        revision index. (e.g. 20140822.1)

        This method gets the current date, and checks if a revision already
        exists in the database. If it doesn't then just the current date is
        returned. If it does exists then the next revision in the set for that
        date will be returned.

        :return: Name of version to use for a new set on this `BootResource`.
        :rtype: string
        """
        version_name = now().strftime("%Y%m%d")
        sets = self.sets.filter(version__startswith=version_name).order_by(
            "version"
        )
        if not sets.exists():
            return version_name
        max_idx = 0
        for resource_set in sets:
            if "." in resource_set.version:
                _, set_idx = resource_set.version.split(".")
                set_idx = int(set_idx)
                if set_idx > max_idx:
                    max_idx = set_idx
        return "%s.%d" % (version_name, max_idx + 1)

    def supports_subarch(self, subarch):
        """Return True if the resource supports the given subarch."""
        _, self_subarch = self.split_arch()
        if subarch == self_subarch:
            return True
        if "subarches" not in self.extra:
            return False
        subarches = self.extra["subarches"].split(",")
        return subarch in subarches
Esempio n. 8
0
class ScriptResult(CleanSave, TimestampedModel):

    # Force model into the metadataserver namespace.
    class Meta(DefaultMeta):
        pass

    script_set = ForeignKey(ScriptSet, editable=False, on_delete=CASCADE)

    # All ScriptResults except commissioning scripts will be linked to a Script
    # as commissioning scripts are still embedded in the MAAS source.
    script = ForeignKey(Script,
                        editable=False,
                        blank=True,
                        null=True,
                        on_delete=CASCADE)

    # Any parameters set by MAAS or the user which should be passed to the
    # running script.
    parameters = JSONObjectField(blank=True, default={})

    # If the result is in reference to a particular block device link it.
    physical_blockdevice = ForeignKey(PhysicalBlockDevice,
                                      editable=False,
                                      blank=True,
                                      null=True,
                                      on_delete=CASCADE)

    script_version = ForeignKey(VersionedTextFile,
                                blank=True,
                                null=True,
                                editable=False,
                                on_delete=SET_NULL)

    status = IntegerField(choices=SCRIPT_STATUS_CHOICES,
                          default=SCRIPT_STATUS.PENDING)

    exit_status = IntegerField(blank=True, null=True)

    # Used by the builtin commissioning scripts and installation result. Also
    # stores the Script name incase the Script is deleted but the result isn't.
    script_name = CharField(max_length=255,
                            unique=False,
                            editable=False,
                            null=True)

    output = BinaryField(max_length=1024 * 1024, blank=True, default=b'')

    stdout = BinaryField(max_length=1024 * 1024, blank=True, default=b'')

    stderr = BinaryField(max_length=1024 * 1024, blank=True, default=b'')

    result = BinaryField(max_length=1024 * 1024, blank=True, default=b'')

    # When the script started to run
    started = DateTimeField(editable=False, null=True, blank=True)

    # When the script finished running
    ended = DateTimeField(editable=False, null=True, blank=True)

    @property
    def name(self):
        if self.script is not None:
            return self.script.name
        elif self.script_name is not None:
            return self.script_name
        else:
            return "Unknown"

    @property
    def status_name(self):
        return SCRIPT_STATUS_CHOICES[self.status][1]

    @property
    def runtime(self):
        if None not in (self.ended, self.started):
            runtime = self.ended - self.started
            return str(runtime - timedelta(microseconds=runtime.microseconds))
        else:
            return ''

    @property
    def starttime(self):
        if self.started is not None:
            return self.started.timestamp()
        else:
            return ''

    @property
    def endtime(self):
        if self.ended is not None:
            return self.ended.timestamp()
        else:
            return ''

    @property
    def estimated_runtime(self):
        # If there is a runtime the script has completed, no need to calculate
        # an estimate.
        if self.runtime != '':
            return self.runtime
        runtime = None
        # Get an estimated runtime from previous runs.
        for script_result in self.history:
            # Only look at passed results when calculating an estimated
            # runtime. Failed results may take longer or shorter than
            # average. Don't use self.history.filter for this as the now
            # cached history list may be used elsewhere.
            if script_result.status != SCRIPT_STATUS.PASSED:
                continue
            # LP: #1730799 - Old results may not have started set.
            if script_result.started is None:
                script_result.started = script_result.ended
                script_result.save(update_fields=['started'])
            previous_runtime = script_result.ended - script_result.started
            if runtime is None:
                runtime = previous_runtime
            else:
                runtime += previous_runtime
                runtime = runtime / 2
        if runtime is None:
            if self.script is not None and self.script.timeout != timedelta(0):
                # If there were no previous runs use the script's timeout.
                return str(self.script.timeout - timedelta(
                    microseconds=self.script.timeout.microseconds))
            else:
                return 'Unknown'
        else:
            return str(runtime - timedelta(microseconds=runtime.microseconds))

    def __str__(self):
        return "%s/%s" % (self.script_set.node.system_id, self.name)

    def read_results(self):
        """Read the results YAML file and validate it."""
        try:
            parsed_yaml = yaml.safe_load(self.result)
        except yaml.YAMLError as err:
            raise ValidationError(err)

        if parsed_yaml is None:
            # No results were given.
            return {}
        elif not isinstance(parsed_yaml, dict):
            raise ValidationError('YAML must be a dictionary.')

        if parsed_yaml.get('status') not in [
                'passed', 'failed', 'degraded', 'timedout', None
        ]:
            raise ValidationError(
                'status must be "passed", "failed", "degraded", or '
                '"timedout".')

        results = parsed_yaml.get('results')
        if results is None:
            # Results are not defined.
            return parsed_yaml
        elif isinstance(results, dict):
            for key, value in results.items():
                if not isinstance(key, str):
                    raise ValidationError(
                        'All keys in the results dictionary must be strings.')

                if not isinstance(value, list):
                    value = [value]
                for i in value:
                    if type(i) not in [str, float, int, bool]:
                        raise ValidationError(
                            'All values in the results dictionary must be '
                            'a string, float, int, or bool.')
        else:
            raise ValidationError('results must be a dictionary.')

        return parsed_yaml

    def store_result(self,
                     exit_status=None,
                     output=None,
                     stdout=None,
                     stderr=None,
                     result=None,
                     script_version_id=None,
                     timedout=False):
        # Don't allow ScriptResults to be overwritten unless the node is a
        # controller. Controllers are allowed to overwrite their results to
        # prevent new ScriptSets being created everytime a controller starts.
        # This also allows us to avoid creating an RPC call for the rack
        # controller to create a new ScriptSet.
        if not self.script_set.node.is_controller:
            # Allow PENDING, INSTALLING, and RUNNING scripts incase the node
            # didn't inform MAAS the Script was being run, it just uploaded
            # results.
            assert self.status in (SCRIPT_STATUS.PENDING,
                                   SCRIPT_STATUS.INSTALLING,
                                   SCRIPT_STATUS.RUNNING)
            assert self.output == b''
            assert self.stdout == b''
            assert self.stderr == b''
            assert self.result == b''
            assert self.script_version is None

        if timedout:
            self.status = SCRIPT_STATUS.TIMEDOUT
        elif exit_status is not None:
            self.exit_status = exit_status
            if exit_status == 0:
                self.status = SCRIPT_STATUS.PASSED
            elif self.status == SCRIPT_STATUS.INSTALLING:
                self.status = SCRIPT_STATUS.FAILED_INSTALLING
            else:
                self.status = SCRIPT_STATUS.FAILED

        if output is not None:
            self.output = Bin(output)
        if stdout is not None:
            self.stdout = Bin(stdout)
        if stderr is not None:
            self.stderr = Bin(stderr)
        if result is not None:
            self.result = Bin(result)
            try:
                parsed_yaml = self.read_results()
            except ValidationError as err:
                err_msg = (
                    "%s(%s) sent a script result with invalid YAML: %s" %
                    (self.script_set.node.fqdn, self.script_set.node.system_id,
                     err.message))
                logger.error(err_msg)
                Event.objects.create_node_event(
                    system_id=self.script_set.node.system_id,
                    event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR,
                    event_description=err_msg)
            else:
                status = parsed_yaml.get('status')
                if status == 'passed':
                    self.status = SCRIPT_STATUS.PASSED
                elif status == 'failed':
                    self.status = SCRIPT_STATUS.FAILED
                elif status == 'degraded':
                    self.status = SCRIPT_STATUS.DEGRADED
                elif status == 'timedout':
                    self.status = SCRIPT_STATUS.TIMEDOUT

        if self.script:
            if script_version_id is not None:
                for script in self.script.script.previous_versions():
                    if script.id == script_version_id:
                        self.script_version = script
                        break
                if self.script_version is None:
                    err_msg = (
                        "%s(%s) sent a script result for %s(%d) with an "
                        "unknown script version(%d)." %
                        (self.script_set.node.fqdn,
                         self.script_set.node.system_id, self.script.name,
                         self.script.id, script_version_id))
                    logger.error(err_msg)
                    Event.objects.create_node_event(
                        system_id=self.script_set.node.system_id,
                        event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR,
                        event_description=err_msg)
            else:
                # If no script version was given assume the latest version
                # was run.
                self.script_version = self.script.script

        # If commissioning result check if its a builtin script, if so run its
        # hook before committing to the database.
        if (self.script_set.result_type == RESULT_TYPE.COMMISSIONING
                and self.name in NODE_INFO_SCRIPTS):
            post_process_hook = NODE_INFO_SCRIPTS[self.name]['hook']
            err = ("%s(%s): commissioning script '%s' failed during "
                   "post-processing." %
                   (self.script_set.node.fqdn, self.script_set.node.system_id,
                    self.name))
            # Circular imports.
            from metadataserver.api import try_or_log_event
            try_or_log_event(self.script_set.node,
                             None,
                             err,
                             post_process_hook,
                             node=self.script_set.node,
                             output=self.stdout,
                             exit_status=self.exit_status)
        self.save()

    @property
    def history(self):
        qs = ScriptResult.objects.filter(
            script_set__node_id=self.script_set.node_id)
        if self.script is not None:
            qs = qs.filter(script=self.script)
        else:
            qs = qs.filter(script_name=self.script_name)
        # XXX ltrager 2017-10-05 - Shows script runs from before MAAS supported
        # the hardware type or physical_blockdevice fields in history.
        # Solves LP: #1721524
        qs = qs.filter(
            Q(physical_blockdevice=self.physical_blockdevice)
            | Q(physical_blockdevice__isnull=True))
        qs = qs.order_by('-id')
        return qs

    def save(self, *args, **kwargs):
        if self.started is None and self.status == SCRIPT_STATUS.RUNNING:
            self.started = datetime.now()
            if 'update_fields' in kwargs:
                kwargs['update_fields'].append('started')
        elif self.ended is None and self.status in {
                SCRIPT_STATUS.PASSED, SCRIPT_STATUS.FAILED,
                SCRIPT_STATUS.TIMEDOUT, SCRIPT_STATUS.ABORTED,
                SCRIPT_STATUS.DEGRADED, SCRIPT_STATUS.FAILED_INSTALLING
        }:
            self.ended = datetime.now()
            if 'update_fields' in kwargs:
                kwargs['update_fields'].append('ended')
            # LP: #1730799 - If a script is run quickly the POST telling MAAS
            # the script has started comes in after the POST telling MAAS the
            # result.
            if self.started is None:
                self.started = self.ended
                if 'update_fields' in kwargs:
                    kwargs['update_fields'].append('started')

        if self.id is None and self.physical_blockdevice is None:
            for param in self.parameters.values():
                if ('value' in param and isinstance(param['value'], dict)
                        and 'physical_blockdevice' in param['value']):
                    physical_blockdevice = param['value'].pop(
                        'physical_blockdevice')
                    self.physical_blockdevice = physical_blockdevice
                    param['value'][
                        'physical_blockdevice_id'] = physical_blockdevice.id

        return super().save(*args, **kwargs)
Esempio n. 9
0
 def test_form_field_is_a_plain_field(self):
     self.assertThat(
         JSONObjectField().formfield(),
         AfterPreprocessing(type, Is(forms.Field)),
     )
Esempio n. 10
0
class ScriptResult(CleanSave, TimestampedModel):

    # Force model into the metadataserver namespace.
    class Meta(DefaultMeta):
        pass

    script_set = ForeignKey(ScriptSet, editable=False, on_delete=CASCADE)

    # All ScriptResults except commissioning scripts will be linked to a Script
    # as commissioning scripts are still embedded in the MAAS source.
    script = ForeignKey(Script,
                        editable=False,
                        blank=True,
                        null=True,
                        on_delete=CASCADE)

    # Any parameters set by MAAS or the user which should be passed to the
    # running script.
    parameters = JSONObjectField(blank=True, default={})

    # If the result is in reference to a particular block device link it.
    physical_blockdevice = ForeignKey(
        PhysicalBlockDevice,
        editable=False,
        blank=True,
        null=True,
        on_delete=CASCADE,
    )

    # If the result is in reference to a particular Interface link it.
    interface = ForeignKey(Interface,
                           editable=False,
                           blank=True,
                           null=True,
                           on_delete=CASCADE)

    script_version = ForeignKey(
        VersionedTextFile,
        blank=True,
        null=True,
        editable=False,
        on_delete=SET_NULL,
    )

    status = IntegerField(choices=SCRIPT_STATUS_CHOICES,
                          default=SCRIPT_STATUS.PENDING)

    exit_status = IntegerField(blank=True, null=True)

    # Used by the builtin commissioning scripts and installation result. Also
    # stores the Script name incase the Script is deleted but the result isn't.
    script_name = CharField(max_length=255,
                            unique=False,
                            editable=False,
                            null=True)

    output = BinaryField(max_length=1024 * 1024, blank=True, default=b"")

    stdout = BinaryField(max_length=1024 * 1024, blank=True, default=b"")

    stderr = BinaryField(max_length=1024 * 1024, blank=True, default=b"")

    result = BinaryField(max_length=1024 * 1024, blank=True, default=b"")

    # When the script started to run
    started = DateTimeField(editable=False, null=True, blank=True)

    # When the script finished running
    ended = DateTimeField(editable=False, null=True, blank=True)

    # Whether or not the failed script result should be suppressed.
    suppressed = BooleanField(default=False)

    @property
    def name(self):
        if self.script is not None:
            return self.script.name
        elif self.script_name is not None:
            return self.script_name
        else:
            return "Unknown"

    @property
    def status_name(self):
        return SCRIPT_STATUS_CHOICES[self.status][1]

    @property
    def runtime(self):
        if None not in (self.ended, self.started):
            runtime = self.ended - self.started
            return str(runtime - timedelta(microseconds=runtime.microseconds))
        else:
            return ""

    @property
    def starttime(self):
        if self.started is not None:
            return self.started.timestamp()
        else:
            return ""

    @property
    def endtime(self):
        if self.ended is not None:
            return self.ended.timestamp()
        else:
            return ""

    @property
    def estimated_runtime(self):
        # If there is a runtime the script has completed, no need to calculate
        # an estimate.
        if self.runtime != "":
            return self.runtime
        runtime = None
        # Get an estimated runtime from previous runs.
        for script_result in self.history.only(
                "status",
                "started",
                "ended",
                "script_id",
                "script_name",
                "script_set_id",
                "physical_blockdevice_id",
                "created",
        ):
            # Only look at passed results when calculating an estimated
            # runtime. Failed results may take longer or shorter than
            # average. Don't use self.history.filter for this as the now
            # cached history list may be used elsewhere.
            if script_result.status != SCRIPT_STATUS.PASSED:
                continue
            # LP: #1730799 - Old results may not have started set.
            if script_result.started is None:
                script_result.started = script_result.ended
                script_result.save(update_fields=["started"])
            previous_runtime = script_result.ended - script_result.started
            if runtime is None:
                runtime = previous_runtime
            else:
                runtime += previous_runtime
                runtime = runtime / 2
        if runtime is None:
            if self.script is not None and self.script.timeout != timedelta(0):
                # If there were no previous runs use the script's timeout.
                return str(self.script.timeout - timedelta(
                    microseconds=self.script.timeout.microseconds))
            else:
                return "Unknown"
        else:
            return str(runtime - timedelta(microseconds=runtime.microseconds))

    def __str__(self):
        return "%s/%s" % (self.script_set.node.system_id, self.name)

    def read_results(self):
        """Read the results YAML file and validate it."""
        try:
            parsed_yaml = yaml.safe_load(self.result)
        except yaml.YAMLError as err:
            raise ValidationError(err)

        if parsed_yaml is None:
            # No results were given.
            return {}
        elif not isinstance(parsed_yaml, dict):
            raise ValidationError("YAML must be a dictionary.")

        if parsed_yaml.get("status") not in [
                "passed",
                "failed",
                "degraded",
                "timedout",
                "skipped",
                None,
        ]:
            raise ValidationError(
                'status must be "passed", "failed", "degraded", '
                '"timedout", or "skipped".')

        link_connected = parsed_yaml.get("link_connected")
        if link_connected is not None:
            if not self.interface:
                raise ValidationError(
                    "link_connected may only be specified if the Script "
                    "accepts an interface parameter.")
            if not isinstance(link_connected, bool):
                raise ValidationError("link_connected must be a boolean")

        results = parsed_yaml.get("results")
        if results is None:
            # Results are not defined.
            return parsed_yaml
        elif isinstance(results, dict):
            for key, value in results.items():
                if not isinstance(key, str):
                    raise ValidationError(
                        "All keys in the results dictionary must be strings.")

                if not isinstance(value, list):
                    value = [value]
                for i in value:
                    if type(i) not in [str, float, int, bool]:
                        raise ValidationError(
                            "All values in the results dictionary must be "
                            "a string, float, int, or bool.")
        else:
            raise ValidationError("results must be a dictionary.")

        return parsed_yaml

    def store_result(
        self,
        exit_status=None,
        output=None,
        stdout=None,
        stderr=None,
        result=None,
        script_version_id=None,
        timedout=False,
    ):
        # Controllers and Pods are allowed to overwrite their results during any status
        # to prevent new ScriptSets being created everytime a controller
        # starts. This also allows us to avoid creating an RPC call for the
        # rack controller to create a new ScriptSet.
        if (not self.script_set.node.is_controller
                and not self.script_set.node.is_pod):
            # Allow PENDING, APPLYING_NETCONF, INSTALLING, and RUNNING scripts
            # incase the node didn't inform MAAS the Script was being run, it
            # just uploaded results.
            assert self.status in SCRIPT_STATUS_RUNNING_OR_PENDING

        if timedout:
            self.status = SCRIPT_STATUS.TIMEDOUT
        elif exit_status is not None:
            self.exit_status = exit_status
            if exit_status == 0:
                self.status = SCRIPT_STATUS.PASSED
            elif self.status == SCRIPT_STATUS.INSTALLING:
                self.status = SCRIPT_STATUS.FAILED_INSTALLING
            elif self.status == SCRIPT_STATUS.APPLYING_NETCONF:
                self.status = SCRIPT_STATUS.FAILED_APPLYING_NETCONF
            else:
                self.status = SCRIPT_STATUS.FAILED

        if output is not None:
            self.output = Bin(output)
        if stdout is not None:
            self.stdout = Bin(stdout)
        if stderr is not None:
            self.stderr = Bin(stderr)
        if result is not None:
            self.result = Bin(result)
            try:
                parsed_yaml = self.read_results()
            except ValidationError as err:
                err_msg = (
                    "%s(%s) sent a script result with invalid YAML: %s" % (
                        self.script_set.node.fqdn,
                        self.script_set.node.system_id,
                        err.message,
                    ))
                logger.error(err_msg)
                Event.objects.create_node_event(
                    system_id=self.script_set.node.system_id,
                    event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR,
                    event_description=err_msg,
                )
            else:
                status = parsed_yaml.get("status")
                if status == "passed":
                    self.status = SCRIPT_STATUS.PASSED
                elif status == "failed":
                    self.status = SCRIPT_STATUS.FAILED
                elif status == "degraded":
                    self.status = SCRIPT_STATUS.DEGRADED
                elif status == "timedout":
                    self.status = SCRIPT_STATUS.TIMEDOUT
                elif status == "skipped":
                    self.status = SCRIPT_STATUS.SKIPPED

                link_connected = parsed_yaml.get("link_connected")
                if self.interface and isinstance(link_connected, bool):
                    self.interface.link_connected = link_connected
                    self.interface.save(update_fields=["link_connected"])

        if self.script:
            if script_version_id is not None:
                for script in self.script.script.previous_versions():
                    if script.id == script_version_id:
                        self.script_version = script
                        break
                if self.script_version is None:
                    err_msg = (
                        "%s(%s) sent a script result for %s(%d) with an "
                        "unknown script version(%d)." % (
                            self.script_set.node.fqdn,
                            self.script_set.node.system_id,
                            self.script.name,
                            self.script.id,
                            script_version_id,
                        ))
                    logger.error(err_msg)
                    Event.objects.create_node_event(
                        system_id=self.script_set.node.system_id,
                        event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR,
                        event_description=err_msg,
                    )
            else:
                # If no script version was given assume the latest version
                # was run.
                self.script_version = self.script.script

        # If commissioning result check if its a builtin script, if so run its
        # hook before committing to the database.
        if (self.script_set.result_type == RESULT_TYPE.COMMISSIONING
                and self.name in NODE_INFO_SCRIPTS and stdout is not None):
            post_process_hook = NODE_INFO_SCRIPTS[self.name]["hook"]
            err = ("%s(%s): commissioning script '%s' failed during "
                   "post-processing." % (
                       self.script_set.node.fqdn,
                       self.script_set.node.system_id,
                       self.name,
                   ))
            # Circular imports.
            from metadataserver.api import try_or_log_event

            signal_status = try_or_log_event(
                self.script_set.node,
                None,
                err,
                post_process_hook,
                node=self.script_set.node,
                output=self.stdout,
                exit_status=self.exit_status,
            )
            # If the script failed to process mark the script as failed to
            # prevent testing from running and help users identify where
            # the error came from. This can happen when a commissioning
            # script generated invalid output.
            if signal_status is not None:
                self.status = SCRIPT_STATUS.FAILED

        if (self.status == SCRIPT_STATUS.PASSED and self.script
                and self.script.script_type == SCRIPT_TYPE.COMMISSIONING
                and self.script.recommission):
            self.script_set.scriptresult_set.filter(
                script_name__in=NODE_INFO_SCRIPTS).update(
                    status=SCRIPT_STATUS.PENDING,
                    started=None,
                    ended=None,
                    updated=now(),
                )

        self.save()

    @property
    def history(self):
        qs = ScriptResult.objects.filter(
            script_set__node_id=self.script_set.node_id)
        if self.script is not None:
            qs = qs.filter(script=self.script)
        else:
            qs = qs.filter(script_name=self.script_name)
        # XXX ltrager 2017-10-05 - Shows script runs from before MAAS supported
        # the hardware type or physical_blockdevice fields in history.
        # Solves LP: #1721524
        qs = qs.filter(
            Q(physical_blockdevice=self.physical_blockdevice)
            | Q(physical_blockdevice__isnull=True))
        qs = qs.order_by("-id")
        return qs

    def save(self, *args, **kwargs):
        if self.started is None and self.status == SCRIPT_STATUS.RUNNING:
            self.started = datetime.now()
            if "update_fields" in kwargs:
                kwargs["update_fields"].append("started")
        elif self.ended is None and self.status not in (
                SCRIPT_STATUS_RUNNING_OR_PENDING):
            self.ended = datetime.now()
            if "update_fields" in kwargs:
                kwargs["update_fields"].append("ended")
            # LP: #1730799 - If a script is run quickly the POST telling MAAS
            # the script has started comes in after the POST telling MAAS the
            # result.
            if self.started is None:
                self.started = self.ended
                if "update_fields" in kwargs:
                    kwargs["update_fields"].append("started")

        if self.id is None:
            purge_unlinked_blockdevice = False
            purge_unlinked_interface = False
            for param in self.parameters.values():
                if "value" in param and isinstance(param["value"], dict):
                    if "physical_blockdevice" in param["value"]:
                        self.physical_blockdevice = param["value"].pop(
                            "physical_blockdevice")
                        param["value"][
                            "physical_blockdevice_id"] = self.physical_blockdevice.id
                        purge_unlinked_blockdevice = True
                    elif "interface" in param["value"]:
                        self.interface = param["value"].pop("interface")
                        param["value"]["interface_id"] = self.interface.id
                        purge_unlinked_interface = True
            if True in {purge_unlinked_blockdevice, purge_unlinked_interface}:
                # Cleanup previous ScriptResults which failed to map to a
                # required device in a previous run. This may happen due to an
                # issue during commissioning such as not finding devices.
                qs = ScriptResult.objects.filter(
                    script=self.script, script_set__node=self.script_set.node)
                # Exclude passed results as they must of been from a previous
                # version of the script which did not require parameters. 2.7
                # adds interface support and the internet-connectivity test
                # has been extended to support interface parameters.
                qs = qs.exclude(status=SCRIPT_STATUS.PASSED)
                if purge_unlinked_blockdevice:
                    qs = qs.filter(physical_blockdevice=None)
                if purge_unlinked_interface:
                    qs = qs.filter(interface=None)
                qs.delete()

        return super().save(*args, **kwargs)
Esempio n. 11
0
class Node(CleanSave, TimestampedModel):
    """A `Node` represents a physical machine used by the MAAS Server.

    :ivar system_id: The unique identifier for this `Node`.
        (e.g. 'node-41eba45e-4cfa-11e1-a052-00225f89f211').
    :ivar hostname: This `Node`'s hostname.
    :ivar status: This `Node`'s status. See the vocabulary
        :class:`NODE_STATUS`.
    :ivar owner: This `Node`'s owner if it's in use, None otherwise.
    :ivar after_commissioning_action: The action to perform after
        commissioning. See vocabulary
        :class:`NODE_AFTER_COMMISSIONING_ACTION`.
    :ivar power_type: The :class:`POWER_TYPE` that determines how this
        node will be powered on.  If not given, the default will be used as
        configured in the `node_power_type` setting.
    :ivar nodegroup: The `NodeGroup` this `Node` belongs to.
    :ivar tags: The list of :class:`Tag`s associated with this `Node`.
    :ivar objects: The :class:`NodeManager`.

    """
    class Meta(DefaultMeta):
        """Needed for South to recognize this model."""

    system_id = CharField(max_length=41,
                          unique=True,
                          default=generate_node_system_id,
                          editable=False)

    hostname = CharField(max_length=255, default='', blank=True, unique=True)

    status = IntegerField(max_length=10,
                          choices=NODE_STATUS_CHOICES,
                          editable=False,
                          default=NODE_STATUS.DEFAULT_STATUS)

    owner = ForeignKey(User,
                       default=None,
                       blank=True,
                       null=True,
                       editable=False)

    after_commissioning_action = IntegerField(
        choices=NODE_AFTER_COMMISSIONING_ACTION_CHOICES,
        default=NODE_AFTER_COMMISSIONING_ACTION.DEFAULT)

    distro_series = CharField(max_length=20,
                              choices=DISTRO_SERIES_CHOICES,
                              null=True,
                              blank=True,
                              default='')

    architecture = CharField(max_length=31,
                             choices=ARCHITECTURE_CHOICES,
                             blank=False,
                             default=ARCHITECTURE.i386)

    routers = djorm_pgarray.fields.ArrayField(dbtype="macaddr")

    agent_name = CharField(max_length=255, default='', blank=True, null=True)

    zone = ForeignKey(Zone,
                      to_field='name',
                      verbose_name="Availability zone",
                      default=None,
                      blank=True,
                      null=True,
                      editable=True,
                      db_index=True)

    # Juju expects the following standard constraints, which are stored here
    # as a basic optimisation over querying the lshw output.
    cpu_count = IntegerField(default=0)
    memory = IntegerField(default=0)
    storage = IntegerField(default=0)

    # For strings, Django insists on abusing the empty string ("blank")
    # to mean "none."
    power_type = CharField(max_length=10,
                           choices=POWER_TYPE_CHOICES,
                           null=False,
                           blank=True,
                           default=POWER_TYPE.DEFAULT)

    # JSON-encoded set of parameters for power control.
    power_parameters = JSONObjectField(blank=True, default="")

    token = ForeignKey(Token,
                       db_index=True,
                       null=True,
                       editable=False,
                       unique=False)

    error = CharField(max_length=255, blank=True, default='')

    netboot = BooleanField(default=True)

    license_key = CharField(max_length=30, null=True, blank=True)
    # This field can't be null, but we can't enforce that in the
    # database schema because we can only create the default value from
    # a complete schema, after schema migration.  We can't use custom
    # model validation either, because the node forms need to set their
    # default values *after* saving the form (with commit=False), which
    # incurs validation before the default values are set.
    # So all we can do is set blank=False, and make the field editable
    # to cajole Django out of skipping it during "model" (actually model
    # form) validation.
    nodegroup = ForeignKey('maasserver.NodeGroup',
                           editable=True,
                           null=True,
                           blank=False)

    tags = ManyToManyField(Tag)

    objects = NodeManager()

    def __unicode__(self):
        if self.hostname:
            return "%s (%s)" % (self.system_id, self.fqdn)
        else:
            return self.system_id

    @property
    def fqdn(self):
        """Fully qualified domain name for this node.

        If MAAS manages DNS for this node, the domain part of the
        hostname (if present), is replaced by the domain configured
        on the cluster controller.
        If not, simply return the node's hostname.
        """
        # Avoid circular imports.
        from maasserver.dns import is_dns_managed
        if is_dns_managed(self.nodegroup):
            # If the hostname field contains a domain, strip it.
            hostname = strip_domain(self.hostname)
            # Build the FQDN by using the hostname and nodegroup.name
            # as the domain name.
            return '%s.%s' % (hostname, self.nodegroup.name)
        else:
            return self.hostname

    def ip_addresses(self):
        """IP addresses allocated to this node."""
        macs = [mac.mac_address for mac in self.macaddress_set.all()]
        dhcpleases_qs = self.nodegroup.dhcplease_set.all()
        if dhcpleases_qs._result_cache is not None:
            # If the dhcp lease set has been pre-fetched: use it to
            # extract the IP addresses associated with the nodes' MAC
            # addresses.
            return [lease.ip for lease in dhcpleases_qs if lease.mac in macs]
        else:
            query = dhcpleases_qs.filter(mac__in=macs)
            return query.values_list('ip', flat=True)

    def tag_names(self):
        # We don't use self.tags.values_list here because this does not
        # take advantage of the cache.
        return [tag.name for tag in self.tags.all()]

    def clean_status(self):
        """Check a node's status transition against the node-status FSM."""
        old_status = get_db_state(self, 'status')
        if self.status == old_status:
            # No transition is always a safe transition.
            pass
        elif self.status in NODE_TRANSITIONS.get(old_status, ()):
            # Valid transition.
            pass
        else:
            # Transition not permitted.
            error_text = "Invalid transition: %s -> %s." % (
                NODE_STATUS_CHOICES_DICT.get(old_status, "Unknown"),
                NODE_STATUS_CHOICES_DICT.get(self.status, "Unknown"),
            )
            raise NodeStateViolation(error_text)

    def clean(self, *args, **kwargs):
        super(Node, self).clean(*args, **kwargs)
        self.clean_status()

    def display_status(self):
        """Return status text as displayed to the user.

        The UI representation is taken from NODE_STATUS_CHOICES_DICT and may
        interpolate the variable "owner" to reflect the username of the node's
        current owner, if any.
        """
        status_text = NODE_STATUS_CHOICES_DICT[self.status]
        if self.status == NODE_STATUS.ALLOCATED:
            # The User is represented as its username in interpolation.
            # Don't just say self.owner.username here, or there will be
            # trouble with unowned nodes!
            return "%s to %s" % (status_text, self.owner)
        else:
            return status_text

    def add_mac_address(self, mac_address):
        """Add a new MAC address to this `Node`.

        :param mac_address: The MAC address to be added.
        :type mac_address: unicode
        :raises: django.core.exceptions.ValidationError_

        .. _django.core.exceptions.ValidationError: https://
           docs.djangoproject.com/en/dev/ref/exceptions/
           #django.core.exceptions.ValidationError
        """
        # Avoid circular imports
        from maasserver.models import MACAddress

        mac = MACAddress(mac_address=mac_address, node=self)
        mac.save()
        return mac

    def remove_mac_address(self, mac_address):
        """Remove a MAC address from this `Node`.

        :param mac_address: The MAC address to be removed.
        :type mac_address: string

        """
        # Avoid circular imports
        from maasserver.models import MACAddress

        mac = MACAddress.objects.get(mac_address=mac_address, node=self)
        if mac:
            mac.delete()

    def accept_enlistment(self, user):
        """Accept this node's (anonymous) enlistment.

        This call makes sense only on a node in Declared state, i.e. one that
        has been anonymously enlisted and is now waiting for a MAAS user to
        accept that enlistment as authentic.  Calling it on a node that is in
        Ready or Commissioning state, however, is not an error -- it probably
        just means that somebody else has beaten you to it.

        :return: This node if it has made the transition from Declared, or
            None if it was already in an accepted state.
        """
        accepted_states = [NODE_STATUS.READY, NODE_STATUS.COMMISSIONING]
        if self.status in accepted_states:
            return None
        if self.status != NODE_STATUS.DECLARED:
            raise NodeStateViolation(
                "Cannot accept node enlistment: node %s is in state %s." %
                (self.system_id, NODE_STATUS_CHOICES_DICT[self.status]))

        self.start_commissioning(user)
        return self

    def start_commissioning(self, user):
        """Install OS and self-test a new node."""
        # Avoid circular imports.
        from metadataserver.commissioning.user_data import generate_user_data
        from metadataserver.models import NodeCommissionResult

        commissioning_user_data = generate_user_data(nodegroup=self.nodegroup)
        NodeCommissionResult.objects.clear_results(self)
        self.status = NODE_STATUS.COMMISSIONING
        self.save()
        # The commissioning profile is handled in start_nodes.
        Node.objects.start_nodes([self.system_id],
                                 user,
                                 user_data=commissioning_user_data)

    def delete(self):
        # Allocated nodes can't be deleted.
        if self.status == NODE_STATUS.ALLOCATED:
            raise NodeStateViolation(
                "Cannot delete node %s: node is in state %s." %
                (self.system_id, NODE_STATUS_CHOICES_DICT[self.status]))
        nodegroup = self.nodegroup
        if nodegroup.get_managed_interface() is not None:
            # Delete the host map(s) in the DHCP server.
            macs = self.macaddress_set.values_list('mac_address', flat=True)
            leases = DHCPLease.objects.filter(mac__in=macs,
                                              nodegroup=nodegroup)
            for lease in leases:
                task_kwargs = dict(ip_address=lease.ip,
                                   server_address="127.0.0.1",
                                   omapi_key=nodegroup.dhcp_key)
                remove_dhcp_host_map.apply_async(queue=nodegroup.uuid,
                                                 kwargs=task_kwargs)
        # Delete the related mac addresses.
        # The DHCPLease objects corresponding to these MACs will be deleted
        # as well. See maasserver/models/dhcplease:delete_lease().
        self.macaddress_set.all().delete()

        super(Node, self).delete()

    def set_random_hostname(self):
        """Set 5 character `hostname` using non-ambiguous characters.

        Using 5 letters from the set 'abcdefghjkmnpqrtwxy346789' we get
        9,765,625 combinations (pow(25, 5)).

        Note that having a hostname starting with a number is perfectly
        valid, see
        http://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names
        """
        domain = Config.objects.get_config("enlistment_domain")
        domain = domain.strip("." + whitespace)
        while True:
            new_hostname = generate_hostname(5)
            if len(domain) > 0:
                self.hostname = "%s.%s" % (new_hostname, domain)
            else:
                self.hostname = "%s" % new_hostname
            try:
                self.save()
            except ValidationError:
                pass
            else:
                break

    def get_effective_power_type(self):
        """Get power-type to use for this node.

        If no power type has been set for the node, get the configured
        default.
        """
        if self.power_type == POWER_TYPE.DEFAULT:
            power_type = Config.objects.get_config('node_power_type')
            if power_type == POWER_TYPE.DEFAULT:
                raise ValueError(
                    "Node power type is set to the default, but "
                    "the default is not yet configured.  The default "
                    "needs to be configured to another, more useful value.")
        else:
            power_type = self.power_type
        return power_type

    def get_primary_mac(self):
        """Return the primary :class:`MACAddress` for this node."""
        macs = self.macaddress_set.order_by('created')[:1]
        if len(macs) > 0:
            return macs[0]
        else:
            return None

    def get_effective_kernel_options(self):
        """Determine any special kernel parameters for this node.

        :return: (tag, kernel_options)
            tag is a Tag object or None. If None, the kernel_options came from
            the global setting.
            kernel_options, a string indicating extra kernel_options that
            should be used when booting this node. May be None if no tags match
            and no global setting has been configured.
        """
        # First, see if there are any tags associated with this node that has a
        # custom kernel parameter
        tags = self.tags.filter(kernel_opts__isnull=False)
        tags = tags.order_by('name')
        for tag in tags:
            if tag.kernel_opts != '':
                return tag, tag.kernel_opts
        global_value = Config.objects.get_config('kernel_opts')
        return None, global_value

    @property
    def work_queue(self):
        """The name of the queue for tasks specific to this node."""
        return self.nodegroup.work_queue

    def get_distro_series(self):
        """Return the distro series to install that node."""
        use_default_distro_series = (not self.distro_series
                                     or self.distro_series
                                     == DISTRO_SERIES.default)
        if use_default_distro_series:
            return Config.objects.get_config('default_distro_series')
        else:
            return self.distro_series

    def set_distro_series(self, series=''):
        """Set the distro series to install that node."""
        self.distro_series = series
        self.save()

    def get_effective_power_parameters(self):
        """Return effective power parameters, including any defaults."""
        if self.power_parameters:
            power_params = self.power_parameters.copy()
        else:
            # An empty power_parameters comes out as an empty unicode string!
            power_params = {}

        power_params.setdefault('system_id', self.system_id)
        power_params.setdefault('virsh', '/usr/bin/virsh')
        power_params.setdefault('vmware', '/usr/bin/vmware_cli')
        power_params.setdefault('fence_cdu', '/usr/sbin/fence_cdu')
        power_params.setdefault('ipmipower', '/usr/sbin/ipmipower')
        power_params.setdefault('ipmitool', '/usr/bin/ipmitool')
        power_params.setdefault('ipmi_chassis_config',
                                '/usr/sbin/ipmi-chassis-config')
        power_params.setdefault('ipmi_config', 'ipmi.conf')
        power_params.setdefault('power_address', 'qemu://localhost/system')
        power_params.setdefault('username', '')
        power_params.setdefault('power_id', self.system_id)
        power_params.setdefault('power_driver', '')

        # The "mac" parameter defaults to the node's primary MAC
        # address, but only if no power parameters were set at all.
        if not self.power_parameters:
            primary_mac = self.get_primary_mac()
            if primary_mac is not None:
                power_params['mac_address'] = primary_mac.mac_address
        return power_params

    def acquire(self, user, token=None, agent_name=''):
        """Mark commissioned node as acquired by the given user and token."""
        assert self.owner is None
        assert token is None or token.user == user
        self.status = NODE_STATUS.ALLOCATED
        self.owner = user
        self.agent_name = agent_name
        self.token = token
        self.save()

    def release(self):
        """Mark allocated or reserved node as available again and power off."""
        Node.objects.stop_nodes([self.system_id], self.owner)
        self.status = NODE_STATUS.READY
        self.owner = None
        self.token = None
        self.agent_name = ''
        self.set_netboot()
        self.save()

    def set_netboot(self, on=True):
        """Set netboot on or off."""
        self.netboot = on
        self.save()

    def should_use_traditional_installer(self):
        """Should this node be installed with the traditional installer?

        By default, nodes should be installed with the traditional installer,
        so this returns `True` when no `use-fastpath-installer` tag has been
        defined.
        """
        return not self.should_use_fastpath_installer()

    def should_use_fastpath_installer(self):
        """Should this node be installed with the Fast Path installer?

        By default, nodes should be installed with the traditional installer,
        so this returns `True` when the `use-fastpath-installer` has been
        defined and `False` when it hasn't.
        """
        return self.tags.filter(name="use-fastpath-installer").exists()

    def use_traditional_installer(self):
        """Set this node to be installed with the traditional installer.

        By default, nodes should be installed with the Traditional installer.

        :raises: :class:`RuntimeError` when the `use-traditional-installer`
            tag is defined *with* an expression. The reason is that the tag
            evaluation machinery will eventually ignore whatever changes you
            make with this method.
        """
        uti_tag, _ = Tag.objects.get_or_create(name="use-fastpath-installer")
        if uti_tag.is_defined:
            raise RuntimeError(
                "The use-fastpath-installer tag is defined with an "
                "expression. This expression must be updated to prevent "
                "this node from booting with the Fast Path installer.")
        self.tags.remove(uti_tag)

    def use_fastpath_installer(self):
        """Set this node to be installed with the Fast Path Installer.

        By default, nodes should be installed with the Traditional Installer.

        :raises: :class:`RuntimeError` when the `use-fastpath-installer`
            tag is defined *with* an expression. The reason is that the tag
            evaluation machinery will eventually ignore whatever changes you
            make with this method.
        """
        uti_tag, _ = Tag.objects.get_or_create(name="use-fastpath-installer")
        if uti_tag.is_defined:
            raise RuntimeError(
                "The use-fastpath-installer tag is defined with an "
                "expression. This expression must be updated to make this "
                "node boot with the Fast Path Installer.")
        self.tags.add(uti_tag)
Esempio n. 12
0
class Script(CleanSave, TimestampedModel):

    # Force model into the metadataserver namespace.
    class Meta(DefaultMeta):
        pass

    objects = ScriptManager()

    name = CharField(max_length=255, unique=True)

    title = CharField(max_length=255, blank=True)

    description = TextField(blank=True)

    tags = ArrayField(TextField(), blank=True, null=True, default=list)

    script_type = IntegerField(choices=SCRIPT_TYPE_CHOICES,
                               default=SCRIPT_TYPE.TESTING)

    # The hardware the script configures or tests.
    hardware_type = IntegerField(choices=HARDWARE_TYPE_CHOICES,
                                 default=HARDWARE_TYPE.NODE)

    # Whether the script can run in parallel with other scripts.
    parallel = IntegerField(choices=SCRIPT_PARALLEL_CHOICES,
                            default=SCRIPT_PARALLEL.DISABLED)

    # Any results which will be made availble after the script is run.
    results = JSONObjectField(blank=True, default={})

    # Parameters which may be passed to the script and their constraints.
    parameters = JSONObjectField(blank=True, default={})

    # apt, snap, dpkg, to install or archives to extract.
    packages = JSONObjectField(blank=True, default={})

    # 0 is no timeout
    timeout = DurationField(default=datetime.timedelta())

    destructive = BooleanField(default=False)

    # True only if the script is shipped with MAAS
    default = BooleanField(default=False)

    script = OneToOneField(VersionedTextFile, on_delete=CASCADE)

    # A list of hardware identifiers(modalias, PCI id, USB id, or name) this
    # script is applicable to. This script will always run on machines with
    # matching hardware.
    for_hardware = ArrayField(CharField(max_length=255),
                              blank=True,
                              default=list)

    # Whether or not the script may reboot while running. Tells the status
    # monitor to wait until NODE_FAILURE_MONITORED_STATUS_TIMEOUTS before
    # timing out.
    may_reboot = BooleanField(default=False)

    # Only applicable to commissioning scripts. When true reruns commissioning
    # scripts after receiving the result.
    recommission = BooleanField(default=False)

    @property
    def ForHardware(self):
        """Parses the for_hardware field and returns a ForHardware tuple."""
        modaliases = []
        pci = []
        usb = []
        for descriptor in self.for_hardware:
            try:
                hwtype, value = descriptor.split(':', 1)
            except ValueError:
                continue
            if hwtype == "modalias":
                modaliases.append(value)
            elif hwtype == "pci":
                pci.append(value)
            elif hwtype == "usb":
                usb.append(value)
        return ForHardware(modaliases, pci, usb)

    @property
    def script_type_name(self):
        for script_type, script_type_name in SCRIPT_TYPE_CHOICES:
            if self.script_type == script_type:
                return script_type_name
        return 'unknown'

    @property
    def hardware_type_name(self):
        return HARDWARE_TYPE_CHOICES[self.hardware_type][1]

    @property
    def parallel_name(self):
        return SCRIPT_PARALLEL_CHOICES[self.parallel][1]

    def __str__(self):
        return self.name

    def add_tag(self, tag):
        """Add tag to Script."""
        if tag not in self.tags:
            self.tags.append(tag)

    def remove_tag(self, tag):
        """Remove tag from Script."""
        if tag in self.tags:
            self.tags.remove(tag)

    def save(self, *args, **kwargs):
        if self.destructive:
            self.add_tag('destructive')
        else:
            self.remove_tag('destructive')

        for hw_type, hw_type_label in HARDWARE_TYPE_CHOICES:
            if hw_type == self.hardware_type:
                self.add_tag(hw_type_label.lower())
            else:
                self.remove_tag(hw_type_label.lower())

        return super().save(*args, **kwargs)
Esempio n. 13
0
class Notification(CleanSave, TimestampedModel):
    """A notification message.

    :ivar ident: Unique identifier for the notification. Not required but is
        used to make sure messages of the same type are not posted multiple
        times.

    :ivar user: Specific user who can see the message.
    :ivar users: If true, this message can be seen by all ordinary users.
    :ivar admins: If true, this message can be seen by all administrators.

    :ivar message: Message that is viewable by the user. This is used as a
        format-style template; see `context`.
    :ivar context: A dict (that can be serialised to JSON) that's used with
        `message`.
    :ivar category: The category of this notification. The "success" category
        is used to reinforce a positive action or event, giving good news. The
        meaning of the "warning" and "error" categories are fairly obvious.
        The "info" category might be used to reaffirm a small action, like "10
        partitions were created on machine foo".
    :ivar dismissable: Whether the notification can be dismissed.
    """
    class Meta(DefaultMeta):
        """Needed for South to recognize this model."""

    objects = NotificationManager()

    # The ident column *is* unique, but uniqueness will be ensured using a
    # partial index in PostgreSQL. These cannot be expressed using Django. See
    # migrations for the SQL used to create this index.
    ident = CharField(max_length=40, null=True, blank=True, default=None)

    user = ForeignKey(User,
                      null=True,
                      blank=True,
                      default=None,
                      on_delete=CASCADE)
    users = BooleanField(null=False, blank=True, default=False)
    admins = BooleanField(null=False, blank=True, default=False)

    message = TextField(null=False, blank=False)
    context = JSONObjectField(null=False, blank=True, default=dict)
    category = CharField(
        null=False,
        blank=True,
        default="info",
        max_length=10,
        choices=[
            ("error", "Error"),
            ("warning", "Warning"),
            ("success", "Success"),
            ("info", "Informational"),
        ],
    )
    dismissable = BooleanField(null=False, blank=True, default=True)

    def render(self):
        """Render this notification's message using its context.

        The message can contain HTML markup. Values from the context are
        escaped.
        """
        markup = Markup(self.message)
        markup = markup.format(**self.context)
        return str(markup)

    def is_relevant_to(self, user):
        """Is this notification relevant to the given user?"""
        return user is not None and (
            (self.user_id is not None and self.user_id == user.id) or
            (self.users and not user.is_superuser) or
            (self.admins and user.is_superuser))

    def dismiss(self, user):
        """Dismiss this notification.

        :param user: The user dismissing this notification.
        """
        if not self.dismissable:
            raise NotificationNotDismissable()
        NotificationDismissal.objects.get_or_create(notification=self,
                                                    user=user)

    def clean(self):
        super().clean()
        # Elementary cleaning that Django can't seem to do for us, mainly
        # because setting blank=False causes any number of problems.
        if self.ident == "":
            self.ident = None
        if self.category == "":
            self.category = "info"
        # The context must be a a dict (well, mapping, but we check for dict
        # because it will be converted to JSON later and a dict-like object
        # won't do). This could be done as a validator but, meh, I'm sick of
        # jumping through Django-hoops like a circus animal.
        if not isinstance(self.context, dict):
            raise ValidationError({"context": "Context is not a mapping."})
        # Finally, check that the notification can be rendered. No point in
        # doing any of this if we cannot relate the message.
        try:
            self.render()
        except Exception:
            raise ValidationError("Notification cannot be rendered.")

    def __repr__(self):
        username = "******" if self.user is None else repr(self.user.username)
        return "<Notification %s user=%s users=%r admins=%r %r>" % (
            self.category.upper(),
            username,
            self.users,
            self.admins,
            self.render(),
        )
Esempio n. 14
0
class JSONFieldModel(Model):
    name = CharField(max_length=255, unique=False)
    value = JSONObjectField(null=True)
Esempio n. 15
0
class ScriptResult(CleanSave, TimestampedModel):

    # Force model into the metadataserver namespace.
    class Meta(DefaultMeta):
        pass

    script_set = ForeignKey(ScriptSet, editable=False, on_delete=CASCADE)

    # All ScriptResults except commissioning scripts will be linked to a Script
    # as commissioning scripts are still embedded in the MAAS source.
    script = ForeignKey(
        Script, editable=False, blank=True, null=True, on_delete=SET_NULL)

    script_version = ForeignKey(
        VersionedTextFile, blank=True, null=True, editable=False,
        on_delete=SET_NULL)

    status = IntegerField(
        choices=SCRIPT_STATUS_CHOICES, default=SCRIPT_STATUS.PENDING)

    exit_status = IntegerField(blank=True, null=True)

    # Used by the builtin commissioning scripts and installation result. Also
    # stores the Script name incase the Script is deleted but the result isn't.
    script_name = CharField(
        max_length=255, unique=False, editable=False, null=True)

    output = BinaryField(max_length=1024 * 1024, blank=True, default=b'')

    stdout = BinaryField(max_length=1024 * 1024, blank=True, default=b'')

    stderr = BinaryField(max_length=1024 * 1024, blank=True, default=b'')

    # If a result is given in the output convert it to JSON and store it here.
    result = JSONObjectField(blank=True, default='')

    # When the script started to run
    started = DateTimeField(editable=False, null=True, blank=True)

    # When the script finished running
    ended = DateTimeField(editable=False, null=True, blank=True)

    @property
    def name(self):
        if self.script is not None:
            return self.script.name
        elif self.script_name is not None:
            return self.script_name
        else:
            return "Unknown"

    @property
    def status_name(self):
        return SCRIPT_STATUS_CHOICES[self.status][1]

    @property
    def runtime(self):
        if None not in (self.ended, self.started):
            runtime = self.ended - self.started
            return str(runtime - timedelta(microseconds=runtime.microseconds))
        else:
            return ''

    def __str__(self):
        return "%s/%s" % (self.script_set.node.system_id, self.name)

    def store_result(
            self, exit_status=None, output=None, stdout=None, stderr=None,
            result=None, script_version_id=None, timedout=False):
        # Don't allow ScriptResults to be overwritten unless the node is a
        # controller. Controllers are allowed to overwrite their results to
        # prevent new ScriptSets being created everytime a controller starts.
        # This also allows us to avoid creating an RPC call for the rack
        # controller to create a new ScriptSet.
        if not self.script_set.node.is_controller:
            # Allow both PENDING and RUNNING scripts incase the node didn't
            # inform MAAS the Script was being run, it just uploaded results.
            assert self.status in (
                SCRIPT_STATUS.PENDING, SCRIPT_STATUS.RUNNING)
            assert self.output == b''
            assert self.stdout == b''
            assert self.stderr == b''
            assert self.result == ''
            assert self.script_version is None

        if timedout:
            self.status = SCRIPT_STATUS.TIMEDOUT
        elif exit_status is not None:
            self.exit_status = exit_status
            if exit_status == 0:
                self.status = SCRIPT_STATUS.PASSED
            else:
                self.status = SCRIPT_STATUS.FAILED
        if output is not None:
            self.output = Bin(output)
        if stdout is not None:
            self.stdout = Bin(stdout)
        if stderr is not None:
            self.stderr = Bin(stderr)
        if result is not None:
            self.result = result
        if self.script:
            if script_version_id is not None:
                for script in self.script.script.previous_versions():
                    if script.id == script_version_id:
                        self.script_version = script
                        break
                if self.script_version is None:
                    err_msg = (
                        "%s(%s) sent a script result for %s(%d) with an "
                        "unknown script version(%d)." % (
                            self.script_set.node.fqdn,
                            self.script_set.node.system_id,
                            self.script.name, self.script.id,
                            script_version_id))
                    logger.error(err_msg)
                    Event.objects.create_node_event(
                        system_id=self.script_set.node.system_id,
                        event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR,
                        event_description=err_msg)
            else:
                # If no script version was given assume the latest version
                # was run.
                self.script_version = self.script.script

        # If commissioning result check if its a builtin script, if so run its
        # hook before committing to the database.
        if (self.script_set.result_type == RESULT_TYPE.COMMISSIONING and
                self.name in NODE_INFO_SCRIPTS):
            post_process_hook = NODE_INFO_SCRIPTS[self.name]['hook']
            post_process_hook(
                node=self.script_set.node, output=self.stdout,
                exit_status=self.exit_status)

        self.save()

    def save(self, *args, **kwargs):
        if self.started is None and self.status == SCRIPT_STATUS.RUNNING:
            self.started = datetime.now()
            if 'update_fields' in kwargs:
                kwargs['update_fields'].append('started')
        elif self.ended is None and self.status in {
                SCRIPT_STATUS.PASSED, SCRIPT_STATUS.FAILED,
                SCRIPT_STATUS.TIMEDOUT, SCRIPT_STATUS.ABORTED}:
            self.ended = datetime.now()
            if 'update_fields' in kwargs:
                kwargs['update_fields'].append('ended')

        return super().save(*args, **kwargs)
Esempio n. 16
0
class BMC(CleanSave, TimestampedModel):
    """A `BMC` represents an existing 'baseboard management controller'.  For
    practical purposes in MAAS, this is any addressable device that can control
    the power state of Nodes. The BMC associated with a Node is the one
    expected to control its power.

    Power parameters that apply to all nodes controlled by a BMC are stored
    here in the BMC. Those that are specific to different Nodes on the same BMC
    are stored in the Node model instances.

    :ivar ip_address: This `BMC`'s IP Address.
    :ivar power_type: The power type defines which type of BMC this is.
        Its value must match a power driver class name.
    :ivar power_parameters: Some JSON containing arbitrary parameters this
        BMC's power driver requires to function.
    :ivar objects: The :class:`BMCManager`.
    """
    class Meta(DefaultMeta):
        unique_together = ("power_type", "power_parameters", "ip_address")

    objects = Manager()

    bmcs = BMCManager()

    bmc_type = IntegerField(choices=BMC_TYPE_CHOICES,
                            editable=False,
                            default=BMC_TYPE.DEFAULT)

    ip_address = ForeignKey(StaticIPAddress,
                            default=None,
                            blank=True,
                            null=True,
                            editable=False,
                            on_delete=SET_NULL)

    # The possible choices for this field depend on the power types advertised
    # by the rack controllers.  This needs to be populated on the fly, in
    # forms.py, each time the form to edit a node is instantiated.
    power_type = CharField(max_length=10, null=False, blank=True, default='')

    # JSON-encoded set of parameters for power control, limited to 32kiB when
    # encoded as JSON. These apply to all Nodes controlled by this BMC.
    power_parameters = JSONObjectField(max_length=(2**15),
                                       blank=True,
                                       default='')

    # Rack controllers that have access to the BMC by routing instead of
    # having direct layer 2 access.
    routable_rack_controllers = ManyToManyField(
        "RackController",
        blank=True,
        editable=True,
        through="BMCRoutableRackControllerRelationship",
        related_name="routable_bmcs")

    # Values for Pod's.
    #  1. Name of the Pod.
    #  2. List of architectures that a Pod supports.
    #  3. Capabilities that the Pod supports.
    #  4. Total cores in the Pod.
    #  5. Fastest CPU speed in the Pod.
    #  6. Total amount of memory in the Pod.
    #  7. Total about in bytes of local storage available in the Pod.
    #  8. Total number of available local disks in the Pod.
    #  9. The resource pool machines in the pod should belong to by default.
    name = CharField(max_length=255, default='', blank=True, unique=True)
    architectures = ArrayField(TextField(),
                               blank=True,
                               null=True,
                               default=list)
    capabilities = ArrayField(TextField(), blank=True, null=True, default=list)
    cores = IntegerField(blank=False, null=False, default=0)
    cpu_speed = IntegerField(blank=False, null=False, default=0)  # MHz
    memory = IntegerField(blank=False, null=False, default=0)
    local_storage = BigIntegerField(  # Bytes
        blank=False, null=False, default=0)
    local_disks = IntegerField(blank=False, null=False, default=-1)
    iscsi_storage = BigIntegerField(  # Bytes
        blank=False, null=False, default=-1)
    default_pool = ForeignKey(ResourcePool,
                              default=None,
                              null=True,
                              blank=True,
                              editable=True,
                              on_delete=PROTECT)

    def __str__(self):
        return "%s (%s)" % (self.id,
                            self.ip_address if self.ip_address else "No IP")

    def _as(self, model):
        """Create a `model` that shares underlying storage with `self`.

        In other words, the newly returned object will be an instance of
        `model` and its `__dict__` will be `self.__dict__`. Not a copy, but a
        reference to, so that changes to one will be reflected in the other.
        """
        new = object.__new__(model)
        new.__dict__ = self.__dict__
        return new

    def as_bmc(self):
        """Return a reference to self that behaves as a `BMC`."""
        return self._as(BMC)

    def as_pod(self):
        """Return a reference to self that behaves as a `Pod`."""
        return self._as(Pod)

    _as_self = {
        BMC_TYPE.BMC: as_bmc,
        BMC_TYPE.POD: as_pod,
    }

    def as_self(self):
        """Return a reference to self that behaves as its own type."""
        return self._as_self[self.bmc_type](self)

    def delete(self):
        """Delete this BMC."""
        maaslog.info("%s: Deleting BMC", self)
        super(BMC, self).delete()

    def save(self, *args, **kwargs):
        """Save this BMC."""
        super(BMC, self).save(*args, **kwargs)
        # We let name be blank for the initial save, but fix it before the
        # save completes.  This is because set_random_name() operates by
        # trying to re-save the BMC with a random hostname, and retrying until
        # there is no conflict.
        if self.name == '':
            self.set_random_name()

    def set_random_name(self):
        """Set a random `name`."""
        while True:
            self.name = petname.Generate(2, "-")
            try:
                self.save()
            except ValidationError:
                pass
            else:
                break

    def clean(self):
        """ Update our ip_address if the address extracted from our power
        parameters has changed. """
        new_ip = BMC.extract_ip_address(self.power_type, self.power_parameters)
        current_ip = None if self.ip_address is None else self.ip_address.ip
        # Set the ip_address field.  If we have a bracketed address, assume
        # it's IPv6, and strip the brackets.
        if new_ip and new_ip.startswith('[') and new_ip.endswith(']'):
            new_ip = new_ip[1:-1]
        if new_ip != current_ip:
            if new_ip is None:
                self.ip_address = None
            else:
                # Update or create a StaticIPAddress for the new IP.
                try:
                    # This atomic block ensures that an exception within will
                    # roll back only this block's DB changes. This allows us to
                    # swallow exceptions in here and keep all changes made
                    # before or after this block is executed.
                    with transaction.atomic():
                        subnet = Subnet.objects.get_best_subnet_for_ip(new_ip)
                        (self.ip_address,
                         _) = StaticIPAddress.objects.get_or_create(
                             ip=new_ip,
                             defaults={
                                 'alloc_type': IPADDRESS_TYPE.STICKY,
                                 'subnet': subnet,
                             })
                except Exception as error:
                    maaslog.info(
                        "BMC could not save extracted IP "
                        "address '%s': '%s'", new_ip, error)

    @staticmethod
    def scope_power_parameters(power_type, power_params):
        """Separate the global, bmc related power_parameters from the local,
        node-specific ones."""
        if not power_type:
            # If there is no power type, treat all params as node params.
            return ({}, power_params)
        power_driver = PowerDriverRegistry.get_item(power_type)
        if power_driver is None:
            # If there is no power driver, treat all params as node params.
            return ({}, power_params)
        power_fields = power_driver.settings
        if not power_fields:
            # If there is no parameter info, treat all params as node params.
            return ({}, power_params)
        bmc_params = {}
        node_params = {}
        for param_name in power_params:
            power_field = power_driver.get_setting(param_name)
            if (power_field and power_field.get('scope') == SETTING_SCOPE.BMC):
                bmc_params[param_name] = power_params[param_name]
            else:
                node_params[param_name] = power_params[param_name]
        return (bmc_params, node_params)

    @staticmethod
    def extract_ip_address(power_type, power_parameters):
        """ Extract the ip_address from the power_parameters. If there is no
        power_type, no power_parameters, or no valid value provided in the
        power_address field, returns None. """
        if not power_type or not power_parameters:
            # Nothing to extract.
            return None
        power_driver = PowerDriverRegistry.get_item(power_type)
        if power_driver is None:
            maaslog.warning("No power driver for power type %s" % power_type)
            return None
        power_type_parameters = power_driver.settings
        if not power_type_parameters:
            maaslog.warning("No power driver settings for power type %s" %
                            power_type)
            return None
        ip_extractor = power_driver.ip_extractor
        if not ip_extractor:
            maaslog.info("No IP extractor configured for power type %s. "
                         "IP will not be extracted." % power_type)
            return None
        field_value = power_parameters.get(ip_extractor.get('field_name'))
        if not field_value:
            maaslog.warning("IP extractor field_value missing for %s" %
                            power_type)
            return None
        extraction_pattern = ip_extractor.get('pattern')
        if not extraction_pattern:
            maaslog.warning("IP extractor extraction_pattern missing for %s" %
                            power_type)
            return None
        match = re.match(extraction_pattern, field_value)
        if match:
            return match.group('address')
        # no match found - return None
        return None

    def get_layer2_usable_rack_controllers(self, with_connection=True):
        """Return a list of `RackController`'s that have the ability to access
        this `BMC` directly through a layer 2 connection."""
        ip_address = self.ip_address
        if ip_address is None or ip_address.ip is None or ip_address.ip == '':
            return set()

        # The BMC has a valid StaticIPAddress set. Make sure that the subnet
        # is correct for that BMC.
        subnet = Subnet.objects.get_best_subnet_for_ip(ip_address.ip)
        if subnet is not None and self.ip_address.subnet_id != subnet.id:
            self.ip_address.subnet = subnet
            self.ip_address.save()

        # Circular imports.
        from maasserver.models.node import RackController
        return RackController.objects.filter_by_url_accessible(
            ip_address.ip, with_connection=with_connection)

    def get_routable_usable_rack_controllers(self, with_connection=True):
        """Return a list of `RackController`'s that have the ability to access
        this `BMC` through a route on the rack controller."""
        routable_racks = [
            relationship.rack_controller
            for relationship in (self.routable_rack_relationships.all(
            ).select_related("rack_controller")) if relationship.routable
        ]
        if with_connection:
            conn_rack_ids = [client.ident for client in getAllClients()]
            return [
                rack for rack in routable_racks
                if rack.system_id in conn_rack_ids
            ]
        else:
            return routable_racks

    def get_usable_rack_controllers(self, with_connection=True):
        """Return a list of `RackController`'s that have the ability to access
        this `BMC` either using layer2 or routable if no layer2 are available.
        """
        racks = self.get_layer2_usable_rack_controllers(
            with_connection=with_connection)
        if len(racks) == 0:
            # No layer2 routable rack controllers. Use routable rack
            # controllers.
            racks = self.get_routable_usable_rack_controllers(
                with_connection=with_connection)
        return racks

    def get_client_identifiers(self):
        """Return a list of identifiers that can be used to get the
        `rpc.common.Client` for this `BMC`.

        :raise NoBMCAccessError: Raised when no rack controllers have access
            to this `BMC`.
        """
        rack_controllers = self.get_usable_rack_controllers()
        identifers = [controller.system_id for controller in rack_controllers]
        return identifers

    def is_accessible(self):
        """If the BMC is accessible by at least one rack controller."""
        racks = self.get_usable_rack_controllers(with_connection=False)
        return len(racks) > 0

    def update_routable_racks(self, routable_racks_ids,
                              non_routable_racks_ids):
        """Set the `routable_rack_controllers` relationship to the new
        information."""
        BMCRoutableRackControllerRelationship.objects.filter(
            bmc=self.as_bmc()).delete()
        self._create_racks_relationship(routable_racks_ids, True)
        self._create_racks_relationship(non_routable_racks_ids, False)

    def _create_racks_relationship(self, rack_ids, routable):
        """Create `BMCRoutableRackControllerRelationship` for list of
        `rack_ids` and wether they are `routable`."""
        # Circular imports.
        from maasserver.models.node import RackController
        for rack_id in rack_ids:
            try:
                rack = RackController.objects.get(system_id=rack_id)
            except RackController.DoesNotExist:
                # Possible it was delete before this call, but very very rare.
                pass
            BMCRoutableRackControllerRelationship(bmc=self,
                                                  rack_controller=rack,
                                                  routable=routable).save()
Esempio n. 17
0
class Script(CleanSave, TimestampedModel):

    # Force model into the metadataserver namespace.
    class Meta(DefaultMeta):
        pass

    objects = ScriptManager()

    name = CharField(max_length=255, unique=True)

    title = CharField(max_length=255, blank=True)

    description = TextField(blank=True)

    tags = ArrayField(TextField(), blank=True, null=True, default=list)

    script_type = IntegerField(choices=SCRIPT_TYPE_CHOICES,
                               default=SCRIPT_TYPE.TESTING)

    # The hardware the script configures or tests.
    hardware_type = IntegerField(choices=HARDWARE_TYPE_CHOICES,
                                 default=HARDWARE_TYPE.NODE)

    # Whether the script can run in parallel with other scripts.
    parallel = IntegerField(choices=SCRIPT_PARALLEL_CHOICES,
                            default=SCRIPT_PARALLEL.DISABLED)

    # Any results which will be made availble after the script is run.
    results = JSONObjectField(blank=True, default={})

    # Parameters which may be passed to the script and their constraints.
    parameters = JSONObjectField(blank=True, default={})

    # apt, snap, dpkg, to install or archives to extract.
    packages = JSONObjectField(blank=True, default={})

    # 0 is no timeout
    timeout = DurationField(default=datetime.timedelta())

    destructive = BooleanField(default=False)

    # True only if the script is shipped with MAAS
    default = BooleanField(default=False)

    script = OneToOneField(VersionedTextFile, on_delete=CASCADE)

    @property
    def script_type_name(self):
        for script_type, script_type_name in SCRIPT_TYPE_CHOICES:
            if self.script_type == script_type:
                return script_type_name
        return 'unknown'

    @property
    def hardware_type_name(self):
        return HARDWARE_TYPE_CHOICES[self.hardware_type][1]

    @property
    def parallel_name(self):
        return SCRIPT_PARALLEL_CHOICES[self.parallel][1]

    def __str__(self):
        return self.name

    def add_tag(self, tag):
        """Add tag to Script."""
        if tag not in self.tags:
            self.tags.append(tag)

    def remove_tag(self, tag):
        """Remove tag from Script."""
        if tag in self.tags:
            self.tags.remove(tag)

    def save(self, *args, **kwargs):
        if self.destructive:
            self.add_tag('destructive')
        else:
            self.remove_tag('destructive')

        for hw_type, hw_type_label in HARDWARE_TYPE_CHOICES:
            if hw_type == self.hardware_type:
                self.add_tag(hw_type_label.lower())
            else:
                self.remove_tag(hw_type_label.lower())

        return super().save(*args, **kwargs)