def test_managed_host_undeployed(self): """Test that an undeployed host can only be force removed""" self.host.state = "undeployed" self.host.save() ObjectCache.update(self.host) expected_job_classes = ["ForceRemoveHostJob"] received_job_classes = [ job["class_name"] for job in self._get_jobs(self.host) ] self.assertEqual(set(received_job_classes), set(expected_job_classes))
def learn_primary_target(self, managed_target): primary_target = None managed_target.managedtargetmount_set.update(primary=False) for tm in managed_target.managedtargetmount_set.all(): # We may well have scanned a subset of the hosts and so not have data for all the target mounts, if we # are rescanning we can know about targetmounts we didn't scan. if tm.host not in self.all_hosts_data: continue try: target_info = next( dev for dev in self.all_hosts_data[tm.host]["local_targets"] if dev["uuid"] == managed_target.uuid) except StopIteration: # LV not in all_hosts_data continue local_nids = set(tm.host.lnet_configuration.get_nids()) if not local_nids: raise NoNidsPresent("Host %s has no NIDS!" % tm.host) if "failover.node" in target_info["params"]: failover_nids = set( normalize_nid(n) for nids in target_info["params"]["failover.node"] for n in nids.split(",")) if not bool(local_nids & failover_nids): # In the case the current nids is not shown in the failover nids # This target is considered primary and has been created with mkfs.lustre --failnode # There isn't any other possibilities to have another primary defined primary_target = tm break elif target_info["mounted"]: # In the case the target has been created with 'mkfs.lustre --servicenodes' # If it is mounted, we use the current target as primary until we found a better candidate primary_target = tm else: # If there are no failover nids then this must be the primary. primary_target = tm break if primary_target != None: log.info("Target %s has been set to primary" % (primary_target)) primary_target.primary = True primary_target.save() ObjectCache.update(primary_target) return primary_target
def toggle_fake_client_state(self): state = 'mounted' if not self.mount.active else 'unmounted' self.mount.state = state self.mount.save() ObjectCache.update(self.mount)
def learn_target_mounts(self): for host, host_data in self.all_hosts_data.items(): # We will compare any found target mounts to all known MGSs for local_info in host_data["local_targets"]: debug_id = (host, local_info["device_paths"][0], local_info["name"]) targets = ManagedTarget.objects.filter(uuid=local_info["uuid"]) if not targets.count(): log.warning("Ignoring %s:%s (%s), target unknown" % debug_id) continue for target in targets: if isinstance(target, FilesystemMember): try: mgs = self._target_find_mgs(host, local_info) except ManagedMgs.DoesNotExist: log.warning( "Can't find MGS for target %s:%s (%s)" % debug_id) continue else: mgs = None if not self.target_available_here(host, mgs, local_info): log.warning( "Ignoring %s on %s, as it is not mountable on this host" % (local_info["name"], host)) continue try: log.info("Target %s seen on %s" % (target, host)) volumenode = self._get_volume_node( host, local_info["device_paths"]) (tm, created) = ManagedTargetMount.objects.get_or_create( target=target, host=host, volume_node=volumenode) if created: if local_info["mounted"]: tm.mount_point = local_info.get("mount_point") tm.save() log.info( "Learned association %d between %s and host %s" % (tm.id, local_info["name"], host)) self._learn_event(host, tm) ObjectCache.add(ManagedTargetMount, tm) if local_info["mounted"]: target.state = "mounted" target.active_mount = tm label = local_info.get("ha_label") if label: target.ha_label = label target.immutable_state = False self.ha_targets[local_info["uuid"]] = { "mount": tm.mount_point, "paths": local_info["device_paths"], } target.save() ObjectCache.update(target) except NoNidsPresent: log.warning( "Cannot set up target %s on %s until LNet is running" % (local_info["name"], host))