def get_steps(self):
        host = ObjectCache.get_one(ManagedHost, lambda mh: mh.id == self.lustre_client_mount.host_id)
        from chroma_core.models.filesystem import ManagedFilesystem

        filesystem = ObjectCache.get_one(ManagedFilesystem, lambda mf: mf.id == self.lustre_client_mount.filesystem_id)
        args = dict(host=host, filesystems=[(filesystem.mount_path(), self.lustre_client_mount.mountpoint)])
        return [(UnmountLustreFilesystemsStep, args)]
Exemple #2
0
    def get_steps(self):
        host = ObjectCache.get_one(
            ManagedHost, lambda mh: mh.id == self.lustre_client_mount.host_id)

        filesystem = ObjectCache.get_one(
            ManagedFilesystem,
            lambda mf: mf.name == self.lustre_client_mount.filesystem)
        args = dict(host=host,
                    filesystems=[(filesystem.mount_path(),
                                  self.lustre_client_mount.mountpoints)])
        return [(UnmountLustreFilesystemsStep, args)]
Exemple #3
0
    def get_steps(self):
        host = ObjectCache.get_one(
            ManagedHost, lambda mh: mh.id == self.lustre_client_mount.host_id)

        mountpoint = (self.lustre_client_mount.mountpoints[0]
                      if self.lustre_client_mount.mountpoints else
                      "/mnt/{}".format(self.lustre_client_mount.filesystem))
        filesystem = ObjectCache.get_one(
            ManagedFilesystem,
            lambda mf: mf.name == self.lustre_client_mount.filesystem)
        args = {
            "host": host,
            "filesystems": [(filesystem.mount_path(), mountpoint)]
        }
        return [(MountLustreFilesystemsStep, args)]
    def get_deps(self, state=None):
        if not state:
            state = self.state

        client_mount = ObjectCache.get_one(LustreClientMount, lambda cm: cm.id == self.client_mount_id)

        deps = []
        if state == "started":
            # Depend on the client mount being mounted in order to
            # start or stay running.
            deps.append(DependOn(client_mount, "mounted", fix_state="stopped"))

        if state != "removed":
            # If the client mount is going to be removed, then the
            # copytool should also be removed.
            deps.append(
                DependOn(
                    client_mount,
                    "mounted",
                    acceptable_states=list(set(self.client_mount.states) - set(["removed"])),
                    fix_state="removed",
                )
            )

        return DependAll(deps)
 def get_deps(self):
     return DependOn(
         ObjectCache.get_one(
             ManagedHost, lambda mh: mh.id == self.lustre_client_mount.
             host_id).lnet_configuration,
         "lnet_up",
     )
    def get_steps(self):
        steps = []

        mgs_target = ObjectCache.get_one(
            ManagedTarget, lambda t: t.id == self.filesystem.mgs_id)

        # Only try to purge filesystem from MGT if the MGT has made it past
        # being formatted (case where a filesystem was created but is being
        # removed before it or its MGT got off the ground)
        if mgs_target.state in ['unformatted', 'formatted']:
            return steps

        # Don't purge immutable filesystems. (Although how this gets called in that case is beyond me)
        if self.filesystem.immutable_state:
            return steps

        # MGS needs to be started
        if not mgs_target.active_mount:
            raise RuntimeError(
                "MGT needs to be running in order to remove the filesystem.")

        steps.append((PurgeFilesystemStep, {
            'filesystem':
            self.filesystem,
            'mgs_device_path':
            mgs_target.active_mount.volume_node.path,
            'mgs_device_type':
            mgs_target.active_mount.volume_node.volume.storage_resource.
            to_resource_class().device_type(),
            'host':
            mgs_target.active_mount.host
        }))

        return steps
    def get_deps(self, state=None):
        if not state:
            state = self.state

        t = get_target_by_name(self.name)
        active_host_id = t["active_host_id"]

        deps = []
        if state == "mounted" and active_host_id and not self.immutable_state:
            from chroma_core.models import LNetConfiguration

            # Depend on the active mount's host having LNet up, so that if
            # LNet is stopped on that host this target will be stopped first.
            host = ObjectCache.get_one(ManagedHost,
                                       lambda mh: mh.id == active_host_id,
                                       fill_on_miss=True)

            lnet_configuration = ObjectCache.get_by_id(
                LNetConfiguration, host.lnet_configuration.id)
            deps.append(
                DependOn(lnet_configuration, "lnet_up", fix_state="unmounted"))

            if host.pacemaker_configuration:
                pacemaker_configuration = ObjectCache.get_by_id(
                    PacemakerConfiguration, host.pacemaker_configuration.id)
                deps.append(
                    DependOn(pacemaker_configuration,
                             "started",
                             fix_state="unmounted"))

            # TODO: also express that this situation may be resolved by migrating
            # the target instead of stopping it.

        if state not in ["removed", "forgotten"]:
            from chroma_core.models import LNetConfiguration

            for host in self.hosts:
                fix_state = "forgotten" if self.immutable_state else "removed"

                lnet_configuration = ObjectCache.get_by_id(
                    LNetConfiguration, host.lnet_configuration.id)
                deps.append(
                    DependOn(lnet_configuration,
                             "lnet_up",
                             unacceptable_states=["unconfigured"],
                             fix_state=fix_state))

                if host.pacemaker_configuration:
                    pacemaker_configuration = ObjectCache.get_by_id(
                        PacemakerConfiguration,
                        host.pacemaker_configuration.id)
                    deps.append(
                        DependOn(
                            pacemaker_configuration,
                            "started",
                            unacceptable_states=["unconfigured"],
                            fix_state=fix_state,
                        ))

        return DependAll(deps)
Exemple #8
0
    def get_deps(self):
        deps = []

        mgs_target = ObjectCache.get_one(
            ManagedTarget, lambda t: t.id == self.filesystem.mgs_id)

        # Can't start a MGT that hasn't made it past formatting.
        if mgs_target.state not in ["unformatted", "formatted"]:
            deps.append(
                DependOn(mgs_target, "mounted", fix_state="unavailable"))
        return DependAll(deps)
    def get_deps(self):
        if issubclass(self.target.downcast_class, ManagedMgs):
            ticket = self.target.downcast().get_ticket()
            if ticket:
                return DependAll(
                    DependOn(ticket, "granted", fix_state="unmounted"))

        if self.target.downcast_class in [ManagedMdt, ManagedOst]:
            from chroma_core.models import FilesystemTicket

            target = self.target.downcast()

            ticket = FilesystemTicket.objects.filter(
                filesystem=target.filesystem_id).first()

            if ticket:
                return DependAll(
                    DependOn(ticket.ticket, "granted", fix_state="unmounted"))

        deps = []

        # Depend on at least one targetmount having lnet up
        for host in self.target.hosts:
            from chroma_core.models import LNetConfiguration

            lnet_configuration = ObjectCache.get_one(
                LNetConfiguration, lambda l: l.host_id == host.id)
            deps.append(
                DependOn(lnet_configuration, "lnet_up", fix_state="unmounted"))

            try:
                pacemaker_configuration = ObjectCache.get_one(
                    PacemakerConfiguration, lambda pm: pm.host_id == host.id)
                deps.append(
                    DependOn(pacemaker_configuration,
                             "started",
                             fix_state="unmounted"))
            except PacemakerConfiguration.DoesNotExist:
                pass

        return DependAny(deps)
    def get_deps(self):
        search = lambda ct: ct.host == self.copytool.host
        copytools = ObjectCache.get(Copytool, search)

        # Only force an unmount if this is the only copytool associated
        # with the host.
        if len(copytools) == 1:
            search = lambda cm: cm.id == self.copytool.client_mount_id
            client_mount = ObjectCache.get_one(LustreClientMount, search)
            return DependOn(client_mount, "unmounted")
        else:
            return DependAll()
Exemple #11
0
    def get_steps(self):
        search = lambda cm: (cm.host == self.host and cm.state == "mounted")
        mounted = ObjectCache.get(LustreClientMount, search)
        args = {
            "host":
            self.host,
            "filesystems": [(
                ObjectCache.get_one(
                    ManagedFilesystem,
                    lambda mf, mtd=m: mf.name == mtd.filesystem).mount_path(),
                m.mountpoints,
            ) for m in mounted],
        }

        return [(UnmountLustreFilesystemsStep, args)]
Exemple #12
0
    def get_deps(self, state=None):
        if not state:
            state = self.state

        deps = []
        if state == "mounted":
            # Depend on this mount's host having LNet up. If LNet is stopped
            # on the host, this filesystem will be unmounted first.
            deps.append(
                DependOn(self.host.lnet_configuration,
                         "lnet_up",
                         fix_state="unmounted"))

        if state != "removed":
            try:
                fs = ObjectCache.get_one(ManagedFilesystem,
                                         lambda mf: mf.name == self.filesystem)

                # Depend on the fs being available.
                deps.append(DependOn(fs, "available", fix_state="unmounted"))

                # If the filesystem is removed, the
                # mount should follow.
                deps.append(
                    DependOn(
                        fs,
                        "available",
                        acceptable_states=list(
                            set(fs.states) - set(["removed", "forgotten"])),
                        fix_state="removed",
                    ))
            except ManagedFilesystem.DoesNotExist:
                pass

            # If the host is removed, the
            # mount should follow.
            deps.append(
                DependOn(
                    self.host,
                    "lnet_up",
                    acceptable_states=list(
                        set(self.host.states) - set(["removed", "forgotten"])),
                    fix_state="removed",
                ))

        return DependAll(deps)
Exemple #13
0
    def get_deps(self, state=None):
        if not state:
            state = self.state

        deps = []

        mgs = ObjectCache.get_one(ManagedTarget, lambda t: t.id == self.mgs_id)

        remove_state = "forgotten" if self.immutable_state else "removed"

        if state not in ["removed", "forgotten"]:
            deps.append(
                DependOn(mgs,
                         "unmounted",
                         acceptable_states=mgs.not_states(
                             ["removed", "forgotten"]),
                         fix_state=remove_state))

        return DependAll(deps)