def save_file(self, filename, file_object, owner): """Save the file to the database. If a file of that name/owner already existed, it will be replaced by the new contents. """ # This probably ought to read in chunks but large files are # not expected. content = Bin(file_object.read()) storage, created = self.get_or_create(filename=filename, owner=owner, defaults={'content': content}) if not created: storage.content = content storage.save() return storage
def create(self, request): """Create a new commissioning script. Each commissioning script is identified by a unique name. By convention the name should consist of a two-digit number, a dash, and a brief descriptive identifier consisting only of ASCII characters. You don't need to follow this convention, but not doing so opens you up to risks w.r.t. encoding and ordering. The name must not contain any whitespace, quotes, or apostrophes. A commissioning machine will run each of the scripts in lexicographical order. There are no promises about how non-ASCII characters are sorted, or even how upper-case letters are sorted relative to lower-case letters. So where ordering matters, use unique numbers. Scripts built into MAAS will have names starting with "00-maas" or "99-maas" to ensure that they run first or last, respectively. Usually a commissioning script will be just that, a script. Ideally a script should be ASCII text to avoid any confusion over encoding. But in some cases a commissioning script might consist of a binary tool provided by a hardware vendor. Either way, the script gets passed to the commissioning machine in the exact form in which it was uploaded. :param name: Unique identifying name for the script. Names should follow the pattern of "25-burn-in-hard-disk" (all ASCII, and with numbers greater than zero, and generally no "weird" characters). :param content: A script file, to be uploaded in binary form. Note: this is not a normal parameter, but a file upload. Its filename is ignored; MAAS will know it by the name you pass to the request. """ content = Bin(get_content_parameter(request)) data = request.data.copy() data['script'] = content data['script_type'] = SCRIPT_TYPE.COMMISSIONING form = ScriptForm(data=data) if form.is_valid(): script = form.save(request) return { 'name': script.name, 'content': b64encode(script.script.data.encode()), 'resource_uri': reverse( 'commissioning_script_handler', args=[script.name]), } else: return MAASAPIValidationError(form.errors)
def make_node_commission_result(self, node=None, name=None, script_result=None, data=None): if node is None: node = self.make_node() if name is None: name = "ncrname-" + self.getRandomString(92) if data is None: data = b"ncrdata-" + self.getRandomBytes() if script_result is None: script_result = random.randint(0, 10) ncr = NodeCommissionResult(node=node, name=name, script_result=script_result, data=Bin(data)) ncr.save() return ncr
def populate_main(): """Populate the main data all in one transaction.""" admin = factory.make_admin(username="******", password="******", completed_intro=False) # noqa user1, _ = factory.make_user_with_keys(username="******", password="******", completed_intro=False) user2, _ = factory.make_user_with_keys(username="******", password="******", completed_intro=False) # Physical zones. zones = [ factory.make_Zone(name="zone-north"), factory.make_Zone(name="zone-south"), ] # DNS domains. domains = [ Domain.objects.get_default_domain(), factory.make_Domain("sample"), factory.make_Domain("ubnt"), ] # Create the fabrics that will be used by the regions, racks, # machines, and devices. fabric0 = Fabric.objects.get_default_fabric() fabric0_untagged = fabric0.get_default_vlan() fabric0_vlan10 = factory.make_VLAN(fabric=fabric0, vid=10) fabric1 = factory.make_Fabric() fabric1_untagged = fabric1.get_default_vlan() fabric1_vlan42 = factory.make_VLAN(fabric=fabric1, vid=42) empty_fabric = factory.make_Fabric() # noqa # Create some spaces. space_mgmt = factory.make_Space("management") space_storage = factory.make_Space("storage") space_internal = factory.make_Space("internal") space_ipv6_testbed = factory.make_Space("ipv6-testbed") # Subnets used by regions, racks, machines, and devices. subnet_1 = factory.make_Subnet( cidr="172.16.1.0/24", gateway_ip="172.16.1.1", vlan=fabric0_untagged, space=space_mgmt, ) subnet_2 = factory.make_Subnet( cidr="172.16.2.0/24", gateway_ip="172.16.2.1", vlan=fabric1_untagged, space=space_mgmt, ) subnet_3 = factory.make_Subnet( cidr="172.16.3.0/24", gateway_ip="172.16.3.1", vlan=fabric0_vlan10, space=space_storage, ) subnet_4 = factory.make_Subnet( # noqa cidr="172.16.4.0/24", gateway_ip="172.16.4.1", vlan=fabric0_vlan10, space=space_internal, ) subnet_2001_db8_42 = factory.make_Subnet( # noqa cidr="2001:db8:42::/64", gateway_ip="", vlan=fabric1_vlan42, space=space_ipv6_testbed, ) ipv4_subnets = [subnet_1, subnet_2, subnet_3, subnet_4] # Static routes on subnets. factory.make_StaticRoute(source=subnet_1, destination=subnet_2) factory.make_StaticRoute(source=subnet_1, destination=subnet_3) factory.make_StaticRoute(source=subnet_1, destination=subnet_4) factory.make_StaticRoute(source=subnet_2, destination=subnet_1) factory.make_StaticRoute(source=subnet_2, destination=subnet_3) factory.make_StaticRoute(source=subnet_2, destination=subnet_4) factory.make_StaticRoute(source=subnet_3, destination=subnet_1) factory.make_StaticRoute(source=subnet_3, destination=subnet_2) factory.make_StaticRoute(source=subnet_3, destination=subnet_4) factory.make_StaticRoute(source=subnet_4, destination=subnet_1) factory.make_StaticRoute(source=subnet_4, destination=subnet_2) factory.make_StaticRoute(source=subnet_4, destination=subnet_3) # Load builtin scripts in the database so we can generate fake results # below. load_builtin_scripts() hostname = gethostname() region_rack = get_one( Node.objects.filter(node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER, hostname=hostname)) # If "make run" executes before "make sampledata", the rack may have # already registered. if region_rack is None: region_rack = factory.make_Node( node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER, hostname=hostname, interface=False, ) # Get list of mac addresses that should be used for the region # rack controller. This will make sure the RegionAdvertisingService # picks the correct region on first start-up and doesn't get multiple. mac_addresses = get_mac_addresses() def get_next_mac(): try: return mac_addresses.pop() except IndexError: return factory.make_mac_address() # Region and rack controller (hostname of dev machine) # eth0 - fabric 0 - untagged # eth1 - fabric 0 - untagged # eth2 - fabric 1 - untagged - 172.16.2.2/24 - static # bond0 - fabric 0 - untagged - 172.16.1.2/24 - static # bond0.10 - fabric 0 - 10 - 172.16.3.2/24 - static eth0 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth0", node=region_rack, vlan=fabric0_untagged, mac_address=get_next_mac(), ) eth1 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth1", node=region_rack, vlan=fabric0_untagged, mac_address=get_next_mac(), ) eth2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth2", node=region_rack, vlan=fabric1_untagged, mac_address=get_next_mac(), ) bond0 = factory.make_Interface( INTERFACE_TYPE.BOND, name="bond0", node=region_rack, vlan=fabric0_untagged, parents=[eth0, eth1], mac_address=eth0.mac_address, ) bond0_10 = factory.make_Interface( INTERFACE_TYPE.VLAN, node=region_rack, vlan=fabric0_vlan10, parents=[bond0], ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.1.2", subnet=subnet_1, interface=bond0, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.2.2", subnet=subnet_2, interface=eth2, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.3.2", subnet=subnet_3, interface=bond0_10, ) fabric0_untagged.primary_rack = region_rack fabric0_untagged.save() fabric1_untagged.primary_rack = region_rack fabric1_untagged.save() fabric0_vlan10.primary_rack = region_rack fabric0_vlan10.save() # Rack controller (happy-rack) # eth0 - fabric 0 - untagged # eth1 - fabric 0 - untagged # eth2 - fabric 1 - untagged - 172.16.2.3/24 - static # bond0 - fabric 0 - untagged - 172.16.1.3/24 - static # bond0.10 - fabric 0 - 10 - 172.16.3.3/24 - static rack = factory.make_Node( node_type=NODE_TYPE.RACK_CONTROLLER, hostname="happy-rack", interface=False, ) eth0 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, name="eth0", node=rack, vlan=fabric0_untagged) eth1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, name="eth1", node=rack, vlan=fabric0_untagged) eth2 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL, name="eth2", node=rack, vlan=fabric1_untagged) bond0 = factory.make_Interface( INTERFACE_TYPE.BOND, name="bond0", node=rack, vlan=fabric0_untagged, parents=[eth0, eth1], ) bond0_10 = factory.make_Interface(INTERFACE_TYPE.VLAN, node=rack, vlan=fabric0_vlan10, parents=[bond0]) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.1.3", subnet=subnet_1, interface=bond0, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.2.3", subnet=subnet_2, interface=eth2, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.3.3", subnet=subnet_3, interface=bond0_10, ) fabric0_untagged.secondary_rack = rack fabric0_untagged.save() fabric1_untagged.secondary_rack = rack fabric1_untagged.save() fabric0_vlan10.secondary_rack = rack fabric0_vlan10.save() # Region controller (happy-region) # eth0 - fabric 0 - untagged # eth1 - fabric 0 - untagged # eth2 - fabric 1 - untagged - 172.16.2.4/24 - static # bond0 - fabric 0 - untagged - 172.16.1.4/24 - static # bond0.10 - fabric 0 - 10 - 172.16.3.4/24 - static region = factory.make_Node( node_type=NODE_TYPE.REGION_CONTROLLER, hostname="happy-region", interface=False, ) eth0 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth0", node=region, vlan=fabric0_untagged, ) eth1 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth1", node=region, vlan=fabric0_untagged, ) eth2 = factory.make_Interface( INTERFACE_TYPE.PHYSICAL, name="eth2", node=region, vlan=fabric1_untagged, ) bond0 = factory.make_Interface( INTERFACE_TYPE.BOND, name="bond0", node=region, vlan=fabric0_untagged, parents=[eth0, eth1], ) bond0_10 = factory.make_Interface(INTERFACE_TYPE.VLAN, node=region, vlan=fabric0_vlan10, parents=[bond0]) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.1.4", subnet=subnet_1, interface=bond0, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.2.4", subnet=subnet_2, interface=eth2, ) factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip="172.16.3.4", subnet=subnet_3, interface=bond0_10, ) # Create one machine for every status. Each machine has a random interface # and storage configration. node_statuses = [ status for status in map_enum(NODE_STATUS).items() if status not in [NODE_STATUS.MISSING, NODE_STATUS.RESERVED, NODE_STATUS.RETIRED] ] machines = [] test_scripts = [ script.name for script in Script.objects.filter(script_type=SCRIPT_TYPE.TESTING) ] for _, status in node_statuses: owner = None if status in ALLOCATED_NODE_STATUSES: owner = random.choice([admin, user1, user2]) elif status in [ NODE_STATUS.COMMISSIONING, NODE_STATUS.FAILED_RELEASING, ]: owner = admin machine = factory.make_Node( status=status, owner=owner, zone=random.choice(zones), interface=False, with_boot_disk=False, power_type="manual", domain=random.choice(domains), memory=random.choice([1024, 4096, 8192]), description=random.choice([ "", "Scheduled for removeal", "Firmware old", "Earmarked for Project Fuse in April", ]), cpu_count=random.randint(2, 8), ) machine.set_random_hostname() machines.append(machine) # Create random network configuration. RandomInterfaceFactory.create_random(machine) # Add random storage devices and set a random layout. for _ in range(random.randint(1, 5)): factory.make_PhysicalBlockDevice( node=machine, size=random.randint(LARGE_BLOCK_DEVICE, LARGE_BLOCK_DEVICE * 10), ) if status in [ NODE_STATUS.READY, NODE_STATUS.ALLOCATED, NODE_STATUS.DEPLOYING, NODE_STATUS.DEPLOYED, NODE_STATUS.FAILED_DEPLOYMENT, NODE_STATUS.RELEASING, NODE_STATUS.FAILED_RELEASING, ]: machine.set_storage_layout( random.choice([ layout for layout in STORAGE_LAYOUTS.keys() if layout != "vmfs6" ])) if status != NODE_STATUS.READY: machine._create_acquired_filesystems() # Add a random amount of events. for _ in range(random.randint(25, 100)): factory.make_Event(node=machine) # Add in commissioning and testing results. if status != NODE_STATUS.NEW: for _ in range(0, random.randint(1, 10)): css = ScriptSet.objects.create_commissioning_script_set( machine) scripts = set() for __ in range(1, len(test_scripts)): scripts.add(random.choice(test_scripts)) tss = ScriptSet.objects.create_testing_script_set( machine, list(scripts)) machine.current_commissioning_script_set = css machine.current_testing_script_set = tss machine.save() # Fill in historic results for script_set in machine.scriptset_set.all(): if script_set in [css, tss]: continue for script_result in script_set: # Can't use script_result.store_result as it will try to # process the result and fail on the fake data. script_result.exit_status = random.randint(0, 255) if script_result.exit_status == 0: script_result.status = SCRIPT_STATUS.PASSED else: script_result.status = random.choice( list(SCRIPT_STATUS_FAILED)) script_result.started = factory.make_date() script_result.ended = script_result.started + timedelta( seconds=random.randint(0, 10000)) script_result.stdout = Bin( factory.make_string().encode("utf-8")) script_result.stderr = Bin( factory.make_string().encode("utf-8")) script_result.output = Bin( factory.make_string().encode("utf-8")) script_result.save() # Only add in results in states where commissiong should be completed. if status not in [NODE_STATUS.NEW, NODE_STATUS.COMMISSIONING]: if status == NODE_STATUS.FAILED_COMMISSIONING: exit_status = random.randint(1, 255) script_status = random.choice(list(SCRIPT_STATUS_FAILED)) else: exit_status = 0 script_status = SCRIPT_STATUS.PASSED for script_result in css: # Can't use script_result.store_result as it will try to # process the result and fail on the fake data. script_result.status = script_status script_result.exit_status = exit_status script_result.started = factory.make_date() script_result.ended = script_result.started + timedelta( seconds=random.randint(0, 10000)) script_result.stdout = Bin( factory.make_string().encode("utf-8")) script_result.stderr = Bin( factory.make_string().encode("utf-8")) script_result.output = Bin( factory.make_string().encode("utf-8")) script_result.save() elif status == NODE_STATUS.COMMISSIONING: for script_result in css: script_result.status = random.choice( list(SCRIPT_STATUS_RUNNING_OR_PENDING)) if script_result.status != SCRIPT_STATUS.PENDING: script_result.started = factory.make_date() script_result.save() # Only add in results in states where testing should be completed. if status not in [NODE_STATUS.NEW, NODE_STATUS.TESTING]: if status == NODE_STATUS.FAILED_TESTING: exit_status = random.randint(1, 255) script_status = random.choice(list(SCRIPT_STATUS_FAILED)) else: exit_status = 0 script_status = SCRIPT_STATUS.PASSED for script_result in tss: # Can't use script_result.store_result as it will try to # process the result and fail on the fake data. script_result.status = script_status script_result.exit_status = exit_status script_result.started = factory.make_date() script_result.ended = script_result.started + timedelta( seconds=random.randint(0, 10000)) script_result.stdout = Bin( factory.make_string().encode("utf-8")) script_result.stderr = Bin( factory.make_string().encode("utf-8")) script_result.output = Bin( factory.make_string().encode("utf-8")) script_result.save() elif status == NODE_STATUS.TESTING: for script_result in tss: script_result.status = random.choice( list(SCRIPT_STATUS_RUNNING_OR_PENDING)) if script_result.status != SCRIPT_STATUS.PENDING: script_result.started = factory.make_date() script_result.save() # Add installation results. if status in [ NODE_STATUS.DEPLOYING, NODE_STATUS.DEPLOYED, NODE_STATUS.FAILED_DEPLOYMENT, ]: script_set = ScriptSet.objects.create_installation_script_set( machine) machine.current_installation_script_set = script_set machine.save() if status == NODE_STATUS.DEPLOYED: for script_result in machine.current_installation_script_set: stdout = factory.make_string().encode("utf-8") script_result.store_result(0, stdout) elif status == NODE_STATUS.FAILED_DEPLOYMENT: for script_result in machine.current_installation_script_set: exit_status = random.randint(1, 255) stdout = factory.make_string().encode("utf-8") stderr = factory.make_string().encode("utf-8") script_result.store_result(exit_status, stdout, stderr) # Add children devices to the deployed machine. if status == NODE_STATUS.DEPLOYED: boot_interface = machine.get_boot_interface() for _ in range(5): device = factory.make_Device( interface=True, domain=machine.domain, parent=machine, vlan=boot_interface.vlan, ) device.set_random_hostname() RandomInterfaceFactory.assign_ip( device.get_boot_interface(), alloc_type=IPADDRESS_TYPE.STICKY, ) # Create a few pods to and assign a random set of the machines to the pods. pods = [None] pod_storage_pools = defaultdict(list) machines_in_pods = defaultdict(list) for _ in range(3): subnet = random.choice(ipv4_subnets) ip = factory.pick_ip_in_Subnet(subnet) ip_address = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet) power_address = "qemu+ssh://ubuntu@%s/system" % ip pod = factory.make_Pod( pod_type="virsh", parameters={"power_address": power_address}, ip_address=ip_address, capabilities=[ Capabilities.DYNAMIC_LOCAL_STORAGE, Capabilities.COMPOSABLE, ], ) for _ in range(3): pool = factory.make_PodStoragePool(pod) pod_storage_pools[pod].append(pool) pod.default_storage_pool = pool pod.save() pods.append(pod) for _ in range(3): subnet = random.choice(ipv4_subnets) ip = factory.pick_ip_in_Subnet(subnet) ip_address = factory.make_StaticIPAddress( alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet) power_address = "%s" % ip pod = factory.make_Pod( pod_type="rsd", parameters={ "power_address": power_address, "power_user": "******", "power_pass": "******", }, ip_address=ip_address, capabilities=[ Capabilities.DYNAMIC_LOCAL_STORAGE, Capabilities.COMPOSABLE, ], ) for _ in range(3): pool = factory.make_PodStoragePool(pod) pod_storage_pools[pod].append(pool) pod.default_storage_pool = pool pod.save() pods.append(pod) for machine in machines: # Add the machine to the pod if its lucky day! pod = random.choice(pods) if pod is not None: machine.bmc = pod machine.instance_power_parameters = {"power_id": machine.hostname} machine.save() machines_in_pods[pod].append(machine) # Assign the block devices on the machine to a storage pool. for block_device in machine.physicalblockdevice_set.all(): block_device.storage_pool = random.choice( pod_storage_pools[pod]) block_device.save() # Update the pod attributes so that it has more available then used. for pod in pods[1:]: pod.cores = pod.get_used_cores() + random.randint(4, 8) pod.memory = pod.get_used_memory() + random.choice( [1024, 2048, 4096, 4096 * 4, 4096 * 8]) pod.local_storage = sum(pool.storage for pool in pod_storage_pools[pod]) pod.save() # Create a few devices. for _ in range(10): device = factory.make_Device(interface=True) device.set_random_hostname() # Add some DHCP snippets. # - Global factory.make_DHCPSnippet( name="foo class", description="adds class for vender 'foo'", value=VersionedTextFile.objects.create(data=dedent("""\ class "foo" { match if substring ( option vendor-class-identifier, 0, 3) = "foo"; } """)), ) factory.make_DHCPSnippet( name="bar class", description="adds class for vender 'bar'", value=VersionedTextFile.objects.create(data=dedent("""\ class "bar" { match if substring ( option vendor-class-identifier, 0, 3) = "bar"; } """)), enabled=False, ) # - Subnet factory.make_DHCPSnippet( name="600 lease time", description="changes lease time to 600 secs.", value=VersionedTextFile.objects.create(data="default-lease-time 600;"), subnet=subnet_1, ) factory.make_DHCPSnippet( name="7200 max lease time", description="changes max lease time to 7200 secs.", value=VersionedTextFile.objects.create(data="max-lease-time 7200;"), subnet=subnet_2, enabled=False, ) # - Node factory.make_DHCPSnippet( name="boot from other server", description="instructs device to boot from other server", value=VersionedTextFile.objects.create(data=dedent("""\ filename "test-boot"; server-name "boot.from.me"; """)), node=device, ) # Add notifications for admins, users, and each individual user, and for # each notification category. factory.make_Notification( "Attention admins! Core critical! Meltdown imminent! Evacuate " "habitat immediately!", admins=True, category="error", ) factory.make_Notification( "Dear users, rumours of a core meltdown are unfounded. Please " "return to your home-pods and places of business.", users=True, category="warning", ) factory.make_Notification( "FREE! For the next 2 hours get FREE blueberry and iodine pellets " "at the nutri-dispensers.", users=True, category="success", ) for user in User.objects.all(): context = {"name": user.username.capitalize()} factory.make_Notification( "Greetings, {name}! Get away from the habitat for the weekend and " "visit the Mare Nubium with MAAS Tours. Use the code METAL to " "claim a special gift!", user=user, context=context, category="info", )
def store_result(self, exit_status=None, output=None, stdout=None, stderr=None, result=None, script_version_id=None, timedout=False): # Don't allow ScriptResults to be overwritten unless the node is a # controller. Controllers are allowed to overwrite their results to # prevent new ScriptSets being created everytime a controller starts. # This also allows us to avoid creating an RPC call for the rack # controller to create a new ScriptSet. if not self.script_set.node.is_controller: # Allow PENDING, INSTALLING, and RUNNING scripts incase the node # didn't inform MAAS the Script was being run, it just uploaded # results. assert self.status in (SCRIPT_STATUS.PENDING, SCRIPT_STATUS.INSTALLING, SCRIPT_STATUS.RUNNING) assert self.output == b'' assert self.stdout == b'' assert self.stderr == b'' assert self.result == b'' assert self.script_version is None if timedout: self.status = SCRIPT_STATUS.TIMEDOUT elif exit_status is not None: self.exit_status = exit_status if exit_status == 0: self.status = SCRIPT_STATUS.PASSED elif self.status == SCRIPT_STATUS.INSTALLING: self.status = SCRIPT_STATUS.FAILED_INSTALLING else: self.status = SCRIPT_STATUS.FAILED if output is not None: self.output = Bin(output) if stdout is not None: self.stdout = Bin(stdout) if stderr is not None: self.stderr = Bin(stderr) if result is not None: self.result = Bin(result) try: parsed_yaml = self.read_results() except ValidationError as err: err_msg = ( "%s(%s) sent a script result with invalid YAML: %s" % (self.script_set.node.fqdn, self.script_set.node.system_id, err.message)) logger.error(err_msg) Event.objects.create_node_event( system_id=self.script_set.node.system_id, event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR, event_description=err_msg) else: status = parsed_yaml.get('status') if status == 'passed': self.status = SCRIPT_STATUS.PASSED elif status == 'failed': self.status = SCRIPT_STATUS.FAILED elif status == 'degraded': self.status = SCRIPT_STATUS.DEGRADED elif status == 'timedout': self.status = SCRIPT_STATUS.TIMEDOUT if self.script: if script_version_id is not None: for script in self.script.script.previous_versions(): if script.id == script_version_id: self.script_version = script break if self.script_version is None: err_msg = ( "%s(%s) sent a script result for %s(%d) with an " "unknown script version(%d)." % (self.script_set.node.fqdn, self.script_set.node.system_id, self.script.name, self.script.id, script_version_id)) logger.error(err_msg) Event.objects.create_node_event( system_id=self.script_set.node.system_id, event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR, event_description=err_msg) else: # If no script version was given assume the latest version # was run. self.script_version = self.script.script # If commissioning result check if its a builtin script, if so run its # hook before committing to the database. if (self.script_set.result_type == RESULT_TYPE.COMMISSIONING and self.name in NODE_INFO_SCRIPTS): post_process_hook = NODE_INFO_SCRIPTS[self.name]['hook'] err = ("%s(%s): commissioning script '%s' failed during " "post-processing." % (self.script_set.node.fqdn, self.script_set.node.system_id, self.name)) # Circular imports. from metadataserver.api import try_or_log_event try_or_log_event(self.script_set.node, None, err, post_process_hook, node=self.script_set.node, output=self.stdout, exit_status=self.exit_status) self.save()
def store_result( self, exit_status=None, output=None, stdout=None, stderr=None, result=None, script_version_id=None, timedout=False, ): # Controllers and Pods are allowed to overwrite their results during any status # to prevent new ScriptSets being created everytime a controller # starts. This also allows us to avoid creating an RPC call for the # rack controller to create a new ScriptSet. if (not self.script_set.node.is_controller and not self.script_set.node.is_pod): # Allow PENDING, APPLYING_NETCONF, INSTALLING, and RUNNING scripts # incase the node didn't inform MAAS the Script was being run, it # just uploaded results. assert self.status in SCRIPT_STATUS_RUNNING_OR_PENDING if timedout: self.status = SCRIPT_STATUS.TIMEDOUT elif exit_status is not None: self.exit_status = exit_status if exit_status == 0: self.status = SCRIPT_STATUS.PASSED elif self.status == SCRIPT_STATUS.INSTALLING: self.status = SCRIPT_STATUS.FAILED_INSTALLING elif self.status == SCRIPT_STATUS.APPLYING_NETCONF: self.status = SCRIPT_STATUS.FAILED_APPLYING_NETCONF else: self.status = SCRIPT_STATUS.FAILED if output is not None: self.output = Bin(output) if stdout is not None: self.stdout = Bin(stdout) if stderr is not None: self.stderr = Bin(stderr) if result is not None: self.result = Bin(result) try: parsed_yaml = self.read_results() except ValidationError as err: err_msg = ( "%s(%s) sent a script result with invalid YAML: %s" % ( self.script_set.node.fqdn, self.script_set.node.system_id, err.message, )) logger.error(err_msg) Event.objects.create_node_event( system_id=self.script_set.node.system_id, event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR, event_description=err_msg, ) else: status = parsed_yaml.get("status") if status == "passed": self.status = SCRIPT_STATUS.PASSED elif status == "failed": self.status = SCRIPT_STATUS.FAILED elif status == "degraded": self.status = SCRIPT_STATUS.DEGRADED elif status == "timedout": self.status = SCRIPT_STATUS.TIMEDOUT elif status == "skipped": self.status = SCRIPT_STATUS.SKIPPED link_connected = parsed_yaml.get("link_connected") if self.interface and isinstance(link_connected, bool): self.interface.link_connected = link_connected self.interface.save(update_fields=["link_connected"]) if self.script: if script_version_id is not None: for script in self.script.script.previous_versions(): if script.id == script_version_id: self.script_version = script break if self.script_version is None: err_msg = ( "%s(%s) sent a script result for %s(%d) with an " "unknown script version(%d)." % ( self.script_set.node.fqdn, self.script_set.node.system_id, self.script.name, self.script.id, script_version_id, )) logger.error(err_msg) Event.objects.create_node_event( system_id=self.script_set.node.system_id, event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR, event_description=err_msg, ) else: # If no script version was given assume the latest version # was run. self.script_version = self.script.script # If commissioning result check if its a builtin script, if so run its # hook before committing to the database. if (self.script_set.result_type == RESULT_TYPE.COMMISSIONING and self.name in NODE_INFO_SCRIPTS and stdout is not None): post_process_hook = NODE_INFO_SCRIPTS[self.name]["hook"] err = ("%s(%s): commissioning script '%s' failed during " "post-processing." % ( self.script_set.node.fqdn, self.script_set.node.system_id, self.name, )) # Circular imports. from metadataserver.api import try_or_log_event signal_status = try_or_log_event( self.script_set.node, None, err, post_process_hook, node=self.script_set.node, output=self.stdout, exit_status=self.exit_status, ) # If the script failed to process mark the script as failed to # prevent testing from running and help users identify where # the error came from. This can happen when a commissioning # script generated invalid output. if signal_status is not None: self.status = SCRIPT_STATUS.FAILED if (self.status == SCRIPT_STATUS.PASSED and self.script and self.script.script_type == SCRIPT_TYPE.COMMISSIONING and self.script.recommission): self.script_set.scriptresult_set.filter( script_name__in=NODE_INFO_SCRIPTS).update( status=SCRIPT_STATUS.PENDING, started=None, ended=None, updated=now(), ) self.save()
def test_get_default_returns_Bin_from_bytes(self): field = BinaryField(null=True) self.patch(field, "default", b"wotcha") self.assertEqual(Bin(b"wotcha"), field.get_default())
def test_looks_up_data(self): data = b"Binary item" binary_item = BinaryFieldModel(data=Bin(data)) binary_item.save() self.assertEqual(binary_item, BinaryFieldModel.objects.get(data=Bin(data)))
def test_returns_bytes_not_text(self): binary_item = BinaryFieldModel(data=Bin(b"Data")) binary_item.save() retrieved_data = BinaryFieldModel.objects.get(id=binary_item.id).data self.assertIsInstance(retrieved_data, bytes)
def test_stores_and_retrieves_empty_data(self): binary_item = BinaryFieldModel(data=Bin(b'')) self.assertEqual(b'', binary_item.data) binary_item.save() self.assertEqual(b'', BinaryFieldModel.objects.get(id=binary_item.id).data)
def test_is_basically_bytes(self): self.assertEqual(b"Hello", Bin(b"Hello"))
def test_scripts_may_be_binary(self): name = make_script_name() CommissioningScript.objects.create(name=name, content=Bin(sample_binary_data)) stored_script = CommissioningScript.objects.get(name=name) self.assertEqual(sample_binary_data, stored_script.content)
def store_result( self, exit_status=None, output=None, stdout=None, stderr=None, result=None, script_version_id=None, timedout=False): # Don't allow ScriptResults to be overwritten unless the node is a # controller. Controllers are allowed to overwrite their results to # prevent new ScriptSets being created everytime a controller starts. # This also allows us to avoid creating an RPC call for the rack # controller to create a new ScriptSet. if not self.script_set.node.is_controller: # Allow both PENDING and RUNNING scripts incase the node didn't # inform MAAS the Script was being run, it just uploaded results. assert self.status in ( SCRIPT_STATUS.PENDING, SCRIPT_STATUS.RUNNING) assert self.output == b'' assert self.stdout == b'' assert self.stderr == b'' assert self.result == '' assert self.script_version is None if timedout: self.status = SCRIPT_STATUS.TIMEDOUT elif exit_status is not None: self.exit_status = exit_status if exit_status == 0: self.status = SCRIPT_STATUS.PASSED else: self.status = SCRIPT_STATUS.FAILED if output is not None: self.output = Bin(output) if stdout is not None: self.stdout = Bin(stdout) if stderr is not None: self.stderr = Bin(stderr) if result is not None: self.result = result if self.script: if script_version_id is not None: for script in self.script.script.previous_versions(): if script.id == script_version_id: self.script_version = script break if self.script_version is None: err_msg = ( "%s(%s) sent a script result for %s(%d) with an " "unknown script version(%d)." % ( self.script_set.node.fqdn, self.script_set.node.system_id, self.script.name, self.script.id, script_version_id)) logger.error(err_msg) Event.objects.create_node_event( system_id=self.script_set.node.system_id, event_type=EVENT_TYPES.SCRIPT_RESULT_ERROR, event_description=err_msg) else: # If no script version was given assume the latest version # was run. self.script_version = self.script.script # If commissioning result check if its a builtin script, if so run its # hook before committing to the database. if (self.script_set.result_type == RESULT_TYPE.COMMISSIONING and self.name in NODE_INFO_SCRIPTS): post_process_hook = NODE_INFO_SCRIPTS[self.name]['hook'] post_process_hook( node=self.script_set.node, output=self.stdout, exit_status=self.exit_status) self.save()
def save(self, *args, **kwargs): content = self.cleaned_data['content'] CommissioningScript.objects.create(name=content.name, content=Bin(content.read()))
def forwards(self, orm): "Write your forwards methods here." from metadataserver.fields import Bin for result in orm.NodeCommissionResult.objects.all(): result.data_bin = Bin(result.data.encode("utf-8")) result.save()
def set_lldp_details(self, node, data): NodeCommissionResult.objects.store_data( node, commissioningscript.LLDP_OUTPUT_NAME, script_result=0, data=Bin(data))