def test_get_enums(self): # This file contains a single enum, named "ENUM". enums = get_enums(getsourcefile(TestFunctions)) self.assertEqual(["ENUM"], [enum.__name__ for enum in enums]) [enum] = enums # Because the module has been executed in a different namespace, the # enum we've found is not the same object as the one in the current # global namespace. self.assertIsNot(ENUM, enum) # It does, however, have the same values. self.assertEqual(map_enum(ENUM), map_enum(enum))
def test_event_type_is_registered_on_first_call_only(self): protocol, connecting = self.patch_rpc_methods(side_effect=[{}, {}]) self.addCleanup((yield connecting)) ip_address = factory.make_ip_address() description = factory.make_name("description") event_name = random.choice(list(map_enum(EVENT_TYPES))) event_detail = EVENT_DETAILS[event_name] event_hub = NodeEventHub() # On the first call, the event type is registered before the log is # sent to the region. yield event_hub.logByIP(event_name, ip_address, description) self.assertThat( protocol.RegisterEventType, MockCalledOnceWith( ANY, name=event_name, description=event_detail.description, level=event_detail.level, ), ) self.assertThat(protocol.SendEventIPAddress, MockCalledOnce()) # Reset RPC call handlers. protocol.RegisterEventType.reset_mock() protocol.SendEventIPAddress.reset_mock() # On the second call, the event type is known to be registered, so the # log is sent to the region immediately. yield event_hub.logByIP(event_name, ip_address, description) self.assertThat(protocol.RegisterEventType, MockNotCalled()) self.assertThat(protocol.SendEventIPAddress, MockCalledOnce())
def get_persistent_errors(): """Return list of current persistent error messages.""" components = map_enum(COMPONENT).values() return sorted( notification.render() for notification in Notification.objects.filter(ident__in=components) )
def unregisterChannel(self, channel): """Unregister the channel.""" with closing(self.connection.cursor()) as cursor: if self.isSystemChannel(channel): # This is a system channel so unlisten only called once. cursor.execute("UNLISTEN %s;" % channel) else: # Not a system channel so unlisten called once for each action. for action in sorted(map_enum(ACTIONS).values()): cursor.execute("UNLISTEN %s_%s;" % (channel, action))
def registerChannel(self, channel): """Register the channel.""" self.log.debug(f"Register Channel {channel}") with self._db_lock, self.connection.cursor() as cursor: if self.isSystemChannel(channel): # This is a system channel so listen only called once. cursor.execute("LISTEN %s;" % channel) else: # Not a system channel so listen called once for each action. for action in sorted(map_enum(ACTIONS).values()): cursor.execute("LISTEN %s_%s;" % (channel, action))
def test__event_is_sent_to_region(self): protocol, connecting = self.patch_rpc_methods() self.addCleanup((yield connecting)) ip_address = factory.make_ip_address() description = factory.make_name('description') event_name = random.choice(list(map_enum(EVENT_TYPES))) yield NodeEventHub().logByIP(event_name, ip_address, description) self.assertThat( protocol.SendEventIPAddress, MockCalledOnceWith( ANY, type_name=event_name, ip_address=ip_address, description=description))
def convertChannel(self, channel): """Convert the postgres channel to a registered channel and action. :raise PostgresListenerNotifyError: When {channel} is not registered or {action} is not in `ACTIONS`. """ channel, action = channel.split("_", 1) if channel not in self.listeners: raise PostgresListenerNotifyError( "%s is not a registered channel." % channel) if action not in map_enum(ACTIONS).values(): raise PostgresListenerNotifyError("%s action is not supported." % action) return channel, action
def test_map_enum_omits_private_or_special_methods(self): class Enum: def __init__(self): pass def __repr__(self): return "Enum" def _save(self): pass VALUE = 9 self.assertItemsEqual(["VALUE"], map_enum(Enum).keys())
def test__failure_is_suppressed_if_node_not_found(self): protocol, connecting = self.patch_rpc_methods( side_effect=[fail(NoSuchNode())]) self.addCleanup((yield connecting)) ip_address = factory.make_ip_address() description = factory.make_name('description') event_name = random.choice(list(map_enum(EVENT_TYPES))) yield NodeEventHub().logByIP(event_name, ip_address, description) self.assertThat( protocol.SendEventIPAddress, MockCalledOnceWith( ANY, type_name=event_name, ip_address=ip_address, description=description))
def test_event_is_sent_to_region(self): protocol, connecting = self.patch_rpc_methods() self.addCleanup((yield connecting)) system_id = factory.make_name("system_id") description = factory.make_name("description") event_name = random.choice(list(map_enum(EVENT_TYPES))) yield NodeEventHub().logByID(event_name, system_id, description) self.assertThat( protocol.SendEvent, MockCalledOnceWith( ANY, type_name=event_name, system_id=system_id, description=description, ), )
def test_updates_cache_if_event_type_not_found(self): protocol, connecting = self.patch_rpc_methods( side_effect=[succeed({}), fail(NoSuchEventType())]) self.addCleanup((yield connecting)) ip_address = factory.make_ip_address() description = factory.make_name("description") event_name = random.choice(list(map_enum(EVENT_TYPES))) event_hub = NodeEventHub() # Fine the first time. yield event_hub.logByIP(event_name, ip_address, description) # The cache has been populated with the event name. self.assertThat(event_hub._types_registered, Equals({event_name})) # Second time it crashes. with ExpectedException(NoSuchEventType): yield event_hub.logByIP(event_name, ip_address, description) # The event has been removed from the cache. self.assertThat(event_hub._types_registered, HasLength(0))
def test_every_event_has_details(self): all_events = map_enum(EVENT_TYPES) self.assertItemsEqual(all_events.values(), EVENT_DETAILS.keys()) self.assertThat(EVENT_DETAILS.values(), AllMatch(IsInstance(EventDetail)))
def get_random_component(): return random.choice(list(map_enum(COMPONENT).values()))
def populate_main(): """Populate the main data all in one transaction.""" admin = factory.make_admin(username="******", password="******", completed_intro=False) # noqa user1, _ = factory.make_user_with_keys(username="******", password="******", completed_intro=False) user2, _ = factory.make_user_with_keys(username="******", password="******", completed_intro=False) # Physical zones. zones = [ factory.make_Zone(name="zone-north"), factory.make_Zone(name="zone-south"), ] # DNS domains. domains = [ Domain.objects.get_default_domain(), factory.make_Domain("sample"), factory.make_Domain("ubnt"), ] # Create the fabrics that will be used by the regions, racks, # machines, and devices. fabric0 = Fabric.objects.get_default_fabric() fabric0_untagged = fabric0.get_default_vlan() fabric0_vlan10 = factory.make_VLAN(fabric=fabric0, vid=10) fabric1 = factory.make_Fabric() fabric1_untagged = fabric1.get_default_vlan() fabric1_vlan42 = factory.make_VLAN(fabric=fabric1, vid=42) empty_fabric = factory.make_Fabric() # noqa # Create some spaces. space_mgmt = factory.make_Space("management") space_storage = factory.make_Space("storage") space_internal = factory.make_Space("internal") space_ipv6_testbed = factory.make_Space("ipv6-testbed") # Subnets used by regions, racks, machines, and devices. subnet_1 = factory.make_Subnet( cidr="172.16.1.0/24", gateway_ip="172.16.1.1", vlan=fabric0_untagged, space=space_mgmt, ) subnet_2 = factory.make_Subnet( cidr="172.16.2.0/24", gateway_ip="172.16.2.1", vlan=fabric1_untagged, space=space_mgmt, ) subnet_3 = factory.make_Subnet( cidr="172.16.3.0/24", gateway_ip="172.16.3.1", vlan=fabric0_vlan10, space=space_storage, ) subnet_4 = factory.make_Subnet( # noqa cidr="172.16.4.0/24", gateway_ip="172.16.4.1", vlan=fabric0_vlan10, space=space_internal, ) subnet_2001_db8_42 = factory.make_Subnet( # noqa cidr="2001:db8:42::/64", gateway_ip="", vlan=fabric1_vlan42, space=space_ipv6_testbed, ) ipv4_subnets = [subnet_1, subnet_2, subnet_3, subnet_4] # Static routes on subnets. factory.make_StaticRoute(source=subnet_1, destination=subnet_2) factory.make_StaticRoute(source=subnet_1, destination=subnet_3) factory.make_StaticRoute(source=subnet_1, destination=subnet_4) factory.make_StaticRoute(source=subnet_2, destination=subnet_1) factory.make_StaticRoute(source=subnet_2, destination=subnet_3) factory.make_StaticRoute(source=subnet_2, destination=subnet_4) factory.make_StaticRoute(source=subnet_3, destination=subnet_1) factory.make_StaticRoute(source=subnet_3, destination=subnet_2) factory.make_StaticRoute(source=subnet_3, destination=subnet_4) factory.make_StaticRoute(source=subnet_4, destination=subnet_1) factory.make_StaticRoute(source=subnet_4, destination=subnet_2) factory.make_StaticRoute(source=subnet_4, destination=subnet_3) # Load builtin scripts in the database so we can generate fake results # below. load_builtin_scripts() hostname = gethostname() region_rack = get_one( Node.objects.filter(node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER, hostname=hostname)) # If "make run" executes before "make sampledata", the rack may have # already registered. if region_rack is None: region_rack = factory.make_Node( node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER, hostname=hostname, interface=False, ) # Get list of mac addresses that should be used for the region # rack controller. This will make sure the RegionAdvertisingService # picks the correct region on first start-up and doesn't get multiple. mac_addresses = get_mac_addresses() def get_next_mac(): try: return mac_addresses.pop() except IndexError: return factory.make_mac_address() # Region and rack controller (hostname of dev machine) # eth0 - fabric 0 - untagged # eth1 - fabric 0 - untagged # eth2 - fabric 1 - untagged - 172.16.2.2/24 - static # bond0 - fabric 0 - untagged - 172.16.1.2/24 - static # bond0.10 - fabric 0 - 10 - 172.16.3.2/24 - static eth0 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth0", node=region_rack, vlan=fabric0_untagged, mac_address=get_next_mac(), ) eth1 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth1", node=region_rack, vlan=fabric0_untagged, mac_address=get_next_mac(), ) eth2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth2", node=region_rack, vlan=fabric1_untagged, mac_address=get_next_mac(), ) bond0 = factory.make_Interface( INTERFACE_TYPE.BOND, name="bond0", node=region_rack, vlan=fabric0_untagged, parents=[eth0, eth1], mac_address=eth0.mac_address, ) bond0_10 = factory.make_Interface( INTERFACE_TYPE.VLAN, node=region_rack, vlan=fabric0_vlan10, parents=[bond0], ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.1.2", subnet=subnet_1, interface=bond0, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.2.2", subnet=subnet_2, interface=eth2, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.3.2", subnet=subnet_3, interface=bond0_10, ) fabric0_untagged.primary_rack = region_rack fabric0_untagged.save() fabric1_untagged.primary_rack = region_rack fabric1_untagged.save() fabric0_vlan10.primary_rack = region_rack fabric0_vlan10.save() # Rack controller (happy-rack) # eth0 - fabric 0 - untagged # eth1 - fabric 0 - untagged # eth2 - fabric 1 - untagged - 172.16.2.3/24 - static # bond0 - fabric 0 - untagged - 172.16.1.3/24 - static # bond0.10 - fabric 0 - 10 - 172.16.3.3/24 - static rack = factory.make_Node( node_type=NODE_TYPE.RACK_CONTROLLER, hostname="happy-rack", interface=False, ) eth0 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, name="eth0", node=rack, vlan=fabric0_untagged) eth1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, name="eth1", node=rack, vlan=fabric0_untagged) eth2 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, name="eth2", node=rack, vlan=fabric1_untagged) bond0 = factory.make_Interface( INTERFACE_TYPE.BOND, name="bond0", node=rack, vlan=fabric0_untagged, parents=[eth0, eth1], ) bond0_10 = factory.make_Interface(INTERFACE_TYPE.VLAN, node=rack, vlan=fabric0_vlan10, parents=[bond0]) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.1.3", subnet=subnet_1, interface=bond0, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.2.3", subnet=subnet_2, interface=eth2, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.3.3", subnet=subnet_3, interface=bond0_10, ) fabric0_untagged.secondary_rack = rack fabric0_untagged.save() fabric1_untagged.secondary_rack = rack fabric1_untagged.save() fabric0_vlan10.secondary_rack = rack fabric0_vlan10.save() # Region controller (happy-region) # eth0 - fabric 0 - untagged # eth1 - fabric 0 - untagged # eth2 - fabric 1 - untagged - 172.16.2.4/24 - static # bond0 - fabric 0 - untagged - 172.16.1.4/24 - static # bond0.10 - fabric 0 - 10 - 172.16.3.4/24 - static region = factory.make_Node( node_type=NODE_TYPE.REGION_CONTROLLER, hostname="happy-region", interface=False, ) eth0 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth0", node=region, vlan=fabric0_untagged, ) eth1 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth1", node=region, vlan=fabric0_untagged, ) eth2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth2", node=region, vlan=fabric1_untagged, ) bond0 = factory.make_Interface( INTERFACE_TYPE.BOND, name="bond0", node=region, vlan=fabric0_untagged, parents=[eth0, eth1], ) bond0_10 = factory.make_Interface(INTERFACE_TYPE.VLAN, node=region, vlan=fabric0_vlan10, parents=[bond0]) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.1.4", subnet=subnet_1, interface=bond0, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.2.4", subnet=subnet_2, interface=eth2, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.3.4", subnet=subnet_3, interface=bond0_10, ) # Create one machine for every status. Each machine has a random interface # and storage configration. node_statuses = [ status for status in map_enum(NODE_STATUS).items() if status not in [NODE_STATUS.MISSING, NODE_STATUS.RESERVED, NODE_STATUS.RETIRED] ] machines = [] test_scripts = [ script.name for script in Script.objects.filter(script_type=SCRIPT_TYPE.TESTING) ] for _, status in node_statuses: owner = None if status in ALLOCATED_NODE_STATUSES: owner = random.choice([admin, user1, user2]) elif status in [ NODE_STATUS.COMMISSIONING, NODE_STATUS.FAILED_RELEASING, ]: owner = admin machine = factory.make_Node( status=status, owner=owner, zone=random.choice(zones), interface=False, with_boot_disk=False, power_type="manual", domain=random.choice(domains), memory=random.choice([1024, 4096, 8192]), description=random.choice([ "", "Scheduled for removeal", "Firmware old", "Earmarked for Project Fuse in April", ]), cpu_count=random.randint(2, 8), ) machine.set_random_hostname() machines.append(machine) # Create random network configuration. RandomInterfaceFactory.create_random(machine) # Add random storage devices and set a random layout. for _ in range(random.randint(1, 5)): factory.make_PhysicalBlockDevice( node=machine, size=random.randint(LARGE_BLOCK_DEVICE, LARGE_BLOCK_DEVICE * 10), ) if status in [ NODE_STATUS.READY, NODE_STATUS.ALLOCATED, NODE_STATUS.DEPLOYING, NODE_STATUS.DEPLOYED, NODE_STATUS.FAILED_DEPLOYMENT, NODE_STATUS.RELEASING, NODE_STATUS.FAILED_RELEASING, ]: machine.set_storage_layout( random.choice([ layout for layout in STORAGE_LAYOUTS.keys() if layout != "vmfs6" ])) if status != NODE_STATUS.READY: machine._create_acquired_filesystems() # Add a random amount of events. for _ in range(random.randint(25, 100)): factory.make_Event(node=machine) # Add in commissioning and testing results. if status != NODE_STATUS.NEW: for _ in range(0, random.randint(1, 10)): css = ScriptSet.objects.create_commissioning_script_set( machine) scripts = set() for __ in range(1, len(test_scripts)): scripts.add(random.choice(test_scripts)) tss = ScriptSet.objects.create_testing_script_set( machine, list(scripts)) machine.current_commissioning_script_set = css machine.current_testing_script_set = tss machine.save() # Fill in historic results for script_set in machine.scriptset_set.all(): if script_set in [css, tss]: continue for script_result in script_set: # Can't use script_result.store_result as it will try to # process the result and fail on the fake data. script_result.exit_status = random.randint(0, 255) if script_result.exit_status == 0: script_result.status = SCRIPT_STATUS.PASSED else: script_result.status = random.choice( list(SCRIPT_STATUS_FAILED)) script_result.started = factory.make_date() script_result.ended = script_result.started + timedelta( seconds=random.randint(0, 10000)) script_result.stdout = Bin( factory.make_string().encode("utf-8")) script_result.stderr = Bin( factory.make_string().encode("utf-8")) script_result.output = Bin( factory.make_string().encode("utf-8")) script_result.save() # Only add in results in states where commissiong should be completed. if status not in [NODE_STATUS.NEW, NODE_STATUS.COMMISSIONING]: if status == NODE_STATUS.FAILED_COMMISSIONING: exit_status = random.randint(1, 255) script_status = random.choice(list(SCRIPT_STATUS_FAILED)) else: exit_status = 0 script_status = SCRIPT_STATUS.PASSED for script_result in css: # Can't use script_result.store_result as it will try to # process the result and fail on the fake data. script_result.status = script_status script_result.exit_status = exit_status script_result.started = factory.make_date() script_result.ended = script_result.started + timedelta( seconds=random.randint(0, 10000)) script_result.stdout = Bin( factory.make_string().encode("utf-8")) script_result.stderr = Bin( factory.make_string().encode("utf-8")) script_result.output = Bin( factory.make_string().encode("utf-8")) script_result.save() elif status == NODE_STATUS.COMMISSIONING: for script_result in css: script_result.status = random.choice( list(SCRIPT_STATUS_RUNNING_OR_PENDING)) if script_result.status != SCRIPT_STATUS.PENDING: script_result.started = factory.make_date() script_result.save() # Only add in results in states where testing should be completed. if status not in [NODE_STATUS.NEW, NODE_STATUS.TESTING]: if status == NODE_STATUS.FAILED_TESTING: exit_status = random.randint(1, 255) script_status = random.choice(list(SCRIPT_STATUS_FAILED)) else: exit_status = 0 script_status = SCRIPT_STATUS.PASSED for script_result in tss: # Can't use script_result.store_result as it will try to # process the result and fail on the fake data. script_result.status = script_status script_result.exit_status = exit_status script_result.started = factory.make_date() script_result.ended = script_result.started + timedelta( seconds=random.randint(0, 10000)) script_result.stdout = Bin( factory.make_string().encode("utf-8")) script_result.stderr = Bin( factory.make_string().encode("utf-8")) script_result.output = Bin( factory.make_string().encode("utf-8")) script_result.save() elif status == NODE_STATUS.TESTING: for script_result in tss: script_result.status = random.choice( list(SCRIPT_STATUS_RUNNING_OR_PENDING)) if script_result.status != SCRIPT_STATUS.PENDING: script_result.started = factory.make_date() script_result.save() # Add installation results. if status in [ NODE_STATUS.DEPLOYING, NODE_STATUS.DEPLOYED, NODE_STATUS.FAILED_DEPLOYMENT, ]: script_set = ScriptSet.objects.create_installation_script_set( machine) machine.current_installation_script_set = script_set machine.save() if status == NODE_STATUS.DEPLOYED: for script_result in machine.current_installation_script_set: stdout = factory.make_string().encode("utf-8") script_result.store_result(0, stdout) elif status == NODE_STATUS.FAILED_DEPLOYMENT: for script_result in machine.current_installation_script_set: exit_status = random.randint(1, 255) stdout = factory.make_string().encode("utf-8") stderr = factory.make_string().encode("utf-8") script_result.store_result(exit_status, stdout, stderr) # Add children devices to the deployed machine. if status == NODE_STATUS.DEPLOYED: boot_interface = machine.get_boot_interface() for _ in range(5): device = factory.make_Device( interface=True, domain=machine.domain, parent=machine, vlan=boot_interface.vlan, ) device.set_random_hostname() RandomInterfaceFactory.assign_ip( device.get_boot_interface(), alloc_type=IPADDRESS_TYPE.STICKY, ) # Create a few pods to and assign a random set of the machines to the pods. pods = [None] pod_storage_pools = defaultdict(list) machines_in_pods = defaultdict(list) for _ in range(3): subnet = random.choice(ipv4_subnets) ip = factory.pick_ip_in_Subnet(subnet) ip_address = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet) power_address = "qemu+ssh://ubuntu@%s/system" % ip pod = factory.make_Pod( pod_type="virsh", parameters={"power_address": power_address}, ip_address=ip_address, capabilities=[ Capabilities.DYNAMIC_LOCAL_STORAGE, Capabilities.COMPOSABLE, ], ) for _ in range(3): pool = factory.make_PodStoragePool(pod) pod_storage_pools[pod].append(pool) pod.default_storage_pool = pool pod.save() pods.append(pod) for _ in range(3): subnet = random.choice(ipv4_subnets) ip = factory.pick_ip_in_Subnet(subnet) ip_address = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet) power_address = "%s" % ip pod = factory.make_Pod( pod_type="rsd", parameters={ "power_address": power_address, "power_user": "******", "power_pass": "******", }, ip_address=ip_address, capabilities=[ Capabilities.DYNAMIC_LOCAL_STORAGE, Capabilities.COMPOSABLE, ], ) for _ in range(3): pool = factory.make_PodStoragePool(pod) pod_storage_pools[pod].append(pool) pod.default_storage_pool = pool pod.save() pods.append(pod) for machine in machines: # Add the machine to the pod if its lucky day! pod = random.choice(pods) if pod is not None: machine.bmc = pod machine.instance_power_parameters = {"power_id": machine.hostname} machine.save() machines_in_pods[pod].append(machine) # Assign the block devices on the machine to a storage pool. for block_device in machine.physicalblockdevice_set.all(): block_device.storage_pool = random.choice( pod_storage_pools[pod]) block_device.save() # Update the pod attributes so that it has more available then used. for pod in pods[1:]: pod.cores = pod.get_used_cores() + random.randint(4, 8) pod.memory = pod.get_used_memory() + random.choice( [1024, 2048, 4096, 4096 * 4, 4096 * 8]) pod.local_storage = sum(pool.storage for pool in pod_storage_pools[pod]) pod.save() # Create a few devices. for _ in range(10): device = factory.make_Device(interface=True) device.set_random_hostname() # Add some DHCP snippets. # - Global factory.make_DHCPSnippet( name="foo class", description="adds class for vender 'foo'", value=VersionedTextFile.objects.create(data=dedent("""\ class "foo" { match if substring ( option vendor-class-identifier, 0, 3) = "foo"; } """)), ) factory.make_DHCPSnippet( name="bar class", description="adds class for vender 'bar'", value=VersionedTextFile.objects.create(data=dedent("""\ class "bar" { match if substring ( option vendor-class-identifier, 0, 3) = "bar"; } """)), enabled=False, ) # - Subnet factory.make_DHCPSnippet( name="600 lease time", description="changes lease time to 600 secs.", value=VersionedTextFile.objects.create(data="default-lease-time 600;"), subnet=subnet_1, ) factory.make_DHCPSnippet( name="7200 max lease time", description="changes max lease time to 7200 secs.", value=VersionedTextFile.objects.create(data="max-lease-time 7200;"), subnet=subnet_2, enabled=False, ) # - Node factory.make_DHCPSnippet( name="boot from other server", description="instructs device to boot from other server", value=VersionedTextFile.objects.create(data=dedent("""\ filename "test-boot"; server-name "boot.from.me"; """)), node=device, ) # Add notifications for admins, users, and each individual user, and for # each notification category. factory.make_Notification( "Attention admins! Core critical! Meltdown imminent! Evacuate " "habitat immediately!", admins=True, category="error", ) factory.make_Notification( "Dear users, rumours of a core meltdown are unfounded. Please " "return to your home-pods and places of business.", users=True, category="warning", ) factory.make_Notification( "FREE! For the next 2 hours get FREE blueberry and iodine pellets " "at the nutri-dispensers.", users=True, category="success", ) for user in User.objects.all(): context = {"name": user.username.capitalize()} factory.make_Notification( "Greetings, {name}! Get away from the habitat for the weekend and " "visit the Mare Nubium with MAAS Tours. Use the code METAL to " "claim a special gift!", user=user, context=context, category="info", )
self.node.acquire(self.user, token=None) try: self.node.start(self.user) except StaticIPAddressExhaustion: raise NodeActionError( "%s: Failed to start, static IP addresses are exhausted." % self.node.hostname) except RPC_EXCEPTIONS + (ExternalProcessError,) as exception: raise NodeActionError(exception) else: return "This node has been asked to start up." FAILED_STATUSES = [ status for status in map_enum(NODE_STATUS).values() if is_failed_status(status) ] class StopNode(NodeAction): """Stop a node.""" name = "stop" display = "Stop node" display_bulk = "Stop selected nodes" actionable_statuses = ( [NODE_STATUS.DEPLOYED, NODE_STATUS.READY] + # Also let a user ask a failed node to shutdown: this # is useful to try to recover from power failures. FAILED_STATUSES )
def test_map_enum_maps_values(self): class Enum: ONE = 1 THREE = 3 self.assertEqual({"ONE": 1, "THREE": 3}, map_enum(Enum))
def test_map_enum_includes_all_enum_keys(self): class Enum: ONE = 1 TWO = 2 self.assertItemsEqual(["ONE", "TWO"], map_enum(Enum).keys())
def test_map_enum_maps_values(self): class Enum: ONE = 1 THREE = 3 self.assertEqual({'ONE': 1, 'THREE': 3}, map_enum(Enum))
return self.audit_description % action.node.hostname def _execute(self): """See `NodeAction.execute`.""" try: self.node.start(self.user) except StaticIPAddressExhaustion: raise NodeActionError( "%s: Failed to start, static IP addresses are exhausted." % self.node.hostname) except RPC_EXCEPTIONS + (ExternalProcessError, ) as exception: raise NodeActionError(exception) FAILED_STATUSES = [ status for status in map_enum(NODE_STATUS).values() if is_failed_status(status) ] class PowerOff(NodeAction): """Power off a node.""" name = "off" display = "Power off..." display_sentence = "powered off" # Let a user power off a node in any non-active status. actionable_statuses = NON_MONITORED_STATUSES permission = NodePermission.edit for_type = {NODE_TYPE.MACHINE, NODE_TYPE.RACK_CONTROLLER} action_type = NODE_ACTION_TYPE.POWER audit_description = "Powered off '%s'."
NODE_FAILURE_MONITORED_STATUS_TIMEOUTS = { NODE_STATUS.COMMISSIONING: 20, NODE_STATUS.DEPLOYING: 40, NODE_STATUS.RELEASING: 5, NODE_STATUS.ENTERING_RESCUE_MODE: 20, NODE_STATUS.EXITING_RESCUE_MODE: 5, NODE_STATUS.TESTING: 20, } # Statuses that correspond to managed steps for which MAAS actively # monitors that the status changes after a fixed period of time. MONITORED_STATUSES = list(NODE_FAILURE_STATUS_TRANSITIONS.keys()) # Non-active statuses. NON_MONITORED_STATUSES = set(map_enum(NODE_STATUS).values()).difference( set(MONITORED_STATUSES)) FAILED_STATUSES = list(NODE_FAILURE_STATUS_TRANSITIONS.values()) # Statuses that are like commissioning, in that we boot an # an ephemeral environment of the latest LTS, run some scripts # provided via user data, and report back success/fail status. COMMISSIONING_LIKE_STATUSES = [ NODE_STATUS.NEW, NODE_STATUS.COMMISSIONING, NODE_STATUS.DISK_ERASING, NODE_STATUS.ENTERING_RESCUE_MODE, NODE_STATUS.RESCUE_MODE, NODE_STATUS.TESTING, ]