示例#1
0
def find_nodegroup(request):
    """Find the nodegroup whose subnet contains the IP Address of the
    originating host of the request..

    The matching nodegroup may have multiple interfaces on the subnet,
    but there can be only one matching nodegroup.
    """
    # Circular imports.
    from maasserver.models import NodeGroup
    ip_address = request.META['REMOTE_ADDR']
    if ip_address is not None:
        management_statuses = (
            NODEGROUPINTERFACE_MANAGEMENT.DHCP,
            NODEGROUPINTERFACE_MANAGEMENT.DHCP_AND_DNS,
        )
        query = NodeGroup.objects.raw("""
            SELECT *
            FROM maasserver_nodegroup
            WHERE id IN (
                SELECT nodegroup_id
                FROM maasserver_nodegroupinterface
                WHERE (inet %s & subnet_mask) = (ip & subnet_mask)
                AND management IN %s
            )
            """, [
                ip_address,
                management_statuses,
                ]
            )
        return get_one(query)
    return None
示例#2
0
 def test_get_one_does_not_trigger_database_counting(self):
     # Avoid typical performance pitfall of querying objects *and*
     # the number of objects.
     item = factory.getRandomString()
     sequence = FakeQueryResult(type(item), [item])
     sequence.__len__ = Mock(side_effect=Exception("len() was called"))
     self.assertEqual(item, get_one(sequence))
示例#3
0
def get_persistent_error(component):
    """Return persistent error for `component`, or None."""
    err = get_one(ComponentError.objects.filter(component=component))
    if err is None:
        return None
    else:
        return err.error
示例#4
0
文件: apikey.py 项目: cloudbase/maas
    def handle(self, *args, **options):
        username = options.get('username', None)
        if username is None:
            raise CommandError("You must provide a username with --username.")

        generate = options.get('generate')
        key_to_delete = options.get('delete', None)
        if generate and key_to_delete is not None:
            raise CommandError("Specify one of --generate or --delete.")

        user = get_one(User.objects.filter(username=username))
        if user is None:
            raise CommandError("User does not exist.")

        if generate:
            # Generate a new api key.
            self._generate_token(user)
            return

        elif key_to_delete is not None:
            # Delete an existing api key.
            self._delete_token(user, key_to_delete)
            return

        else:
            # No mutating action requested, so just print existing keys.
            tokens = user.get_profile().get_authorisation_tokens()
            for token in tokens:
                self._print_token(token)
示例#5
0
文件: tags.py 项目: cloudbase/maas
def get_nodegroup_worker_client(nodegroup_uuid):
    """Get a MAASClient that can do work for this nodegroup."""
    nodegroup = get_one(NodeGroup.objects.filter(uuid=nodegroup_uuid))
    django_client = OAuthAuthenticatedClient(
        get_worker_user(), token=nodegroup.api_token)
    maas_client = MAASDjangoTestClient(django_client)
    return maas_client
    def forwards(self, orm):

        # Marking the nodegroup worker user as active, or it won't be
        # authorized to use the API.
        user = get_one(orm['auth.User'].objects.filter(
            username='******'))
        if user is not None and not user.is_active:
            user.is_active = True
            user.save()
示例#7
0
 def test_new_creates_nodegroup_with_interface(self):
     name = factory.make_name('nodegroup')
     uuid = factory.getRandomUUID()
     ip = factory.getRandomIPAddress()
     nodegroup = NodeGroup.objects.new(name, uuid, ip)
     interface = get_one(nodegroup.nodegroupinterface_set.all())
     self.assertEqual(
         (name, uuid, ip),
         (nodegroup.name, nodegroup.uuid, interface.ip))
示例#8
0
 def test_adding_works(self):
     key_string = get_data('data/test_rsa0.pub')
     response = self.client.post(
         reverse('sshkeys_handler'),
         data=dict(op="new", key=key_string))
     self.assertEqual(httplib.CREATED, response.status_code)
     parsed_response = json.loads(response.content)
     self.assertEqual(key_string, parsed_response["key"])
     added_key = get_one(SSHKey.objects.filter(user=self.logged_in_user))
     self.assertEqual(key_string, added_key.key)
    def test_store_data(self):
        node = factory.make_node()
        name = factory.getRandomString(255)
        data = factory.getRandomString(1024 * 1024)
        script_result = randint(0, 10)
        NodeCommissionResult.objects.store_data(
            node, name=name, script_result=script_result, data=data)

        self.assertAttributes(
            get_one(NodeCommissionResult.objects.filter(node=node)),
            dict(name=name, data=data))
示例#10
0
 def test_new_creates_nodegroup_with_given_dhcp_settings(self):
     name = factory.make_name('nodegroup')
     uuid = factory.make_name('uuid')
     dhcp_network, dhcp_settings = make_dhcp_settings()
     ip = factory.getRandomIPInNetwork(dhcp_network)
     nodegroup = NodeGroup.objects.new(name, uuid, ip, **dhcp_settings)
     nodegroup = reload_object(nodegroup)
     interface = get_one(nodegroup.nodegroupinterface_set.all())
     self.assertEqual(name, nodegroup.name)
     self.assertThat(
         interface, MatchesStructure.byEquality(**dhcp_settings))
示例#11
0
def get_db_state(instance, field_name):
    """Get the persisted state of a given field for a given model instance.

    :param instance: The model instance to consider.
    :type instance: :class:`django.db.models.Model`
    :param field_name: The name of the field to return.
    :type field_name: basestring
    """
    obj = get_one(instance.__class__.objects.filter(pk=instance.pk))
    if obj is None:
        return None
    else:
        return getattr(obj, field_name)
示例#12
0
 def test_ensure_master_creates_minimal_interface(self):
     master = NodeGroup.objects.ensure_master()
     interface = get_one(master.nodegroupinterface_set.all())
     self.assertThat(
         interface,
         MatchesStructure.byEquality(
             ip='127.0.0.1',
             subnet_mask=None,
             broadcast_ip=None,
             router_ip=None,
             ip_range_low=None,
             ip_range_high=None,
         ))
示例#13
0
文件: api.py 项目: cloudbase/maas
def get_node_for_mac(mac):
    """Identify node being queried based on its MAC address.

    This form of access is a security hazard, and thus it is permitted only
    on development systems where ALLOW_UNSAFE_METADATA_ACCESS is enabled.
    """
    if not settings.ALLOW_UNSAFE_METADATA_ACCESS:
        raise PermissionDenied(
            "Unauthenticated metadata access is not allowed on this MAAS.")
    match = get_one(MACAddress.objects.filter(mac_address=mac))
    if match is None:
        raise MAASAPINotFound()
    return match.node
示例#14
0
    def test_apikey_gets_keys(self):
        stderr = BytesIO()
        out = BytesIO()
        stdout = getwriter("UTF-8")(out)
        user = factory.make_user()
        call_command(
            'apikey', username=user.username, stderr=stderr, stdout=stdout)
        self.assertEqual('', stderr.getvalue().strip())

        expected_token = get_one(
            user.get_profile().get_authorisation_tokens())
        expected_string = convert_tuple_to_string(
            get_creds_tuple(expected_token)) + '\n'
        self.assertEqual(expected_string, stdout.getvalue())
示例#15
0
    def test_apikey_deletes_key(self):
        stderr = BytesIO()
        stdout = BytesIO()
        user = factory.make_user()
        existing_token = get_one(
            user.get_profile().get_authorisation_tokens())
        token_string = convert_tuple_to_string(
            get_creds_tuple(existing_token))
        call_command(
            'apikey', username=user.username, delete=token_string,
            stderr=stderr, stdout=stdout)
        self.assertEqual('', stderr.getvalue().strip())

        keys_after = user.get_profile().get_authorisation_tokens()
        self.assertEqual(0, len(keys_after))
示例#16
0
def reload_object(model_object):
    """Reload `obj` from the database.

    Use this when a test needs to inspect changes to model objects made by
    the API.

    If the object has been deleted, this will return None.

    :param model_object: Model object to reload.
    :type model_object: Concrete `Model` subtype.
    :return: Freshly-loaded instance of `model_object`, or None.
    :rtype: Same as `model_object`.
    """
    model_class = model_object.__class__
    return get_one(model_class.objects.filter(id=model_object.id))
示例#17
0
    def test_createadmin_creates_admin(self):
        stderr = BytesIO()
        stdout = BytesIO()
        username = factory.getRandomString()
        password = factory.getRandomString()
        email = '*****@*****.**' % factory.getRandomString()
        call_command(
            'createadmin', username=username, password=password,
            email=email, stderr=stderr, stdout=stdout)
        user = get_one(User.objects.filter(username=username))

        self.assertEquals('', stderr.getvalue().strip())
        self.assertEquals('', stdout.getvalue().strip())
        self.assertTrue(user.check_password(password))
        self.assertTrue(user.is_superuser)
        self.assertEqual(email, user.email)
示例#18
0
    def test_api_key_rejects_deletion_of_nonexistent_key(self):
        stderr = BytesIO()
        user = factory.make_user()
        existing_token = get_one(
            user.get_profile().get_authorisation_tokens())
        token_string = convert_tuple_to_string(
            get_creds_tuple(existing_token))
        call_command(
            'apikey', username=user.username, delete=token_string,
            stderr=stderr)
        self.assertEqual('', stderr.getvalue().strip())

        # Delete it again. Check that there's a sensible rejection.
        error_text = assertCommandErrors(
            self, 'apikey', username=user.username, delete=token_string)
        self.assertIn(
            "No matching api key found", error_text)
示例#19
0
 def test_POST_new_associates_mac_addresses(self):
     # The API allows a Node to be created and associated with MAC
     # Addresses.
     architecture = factory.getRandomChoice(ARCHITECTURE_CHOICES)
     self.client.post(
         reverse('nodes_handler'),
         {
             'op': 'new',
             'hostname': 'diane',
             'architecture': architecture,
             'after_commissioning_action': (
                 NODE_AFTER_COMMISSIONING_ACTION.DEFAULT),
             'mac_addresses': ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'],
         })
     diane = get_one(Node.objects.filter(hostname='diane'))
     self.assertItemsEqual(
         ['aa:bb:cc:dd:ee:ff', '22:bb:cc:dd:ee:ff'],
         [mac.mac_address for mac in diane.macaddress_set.all()])
示例#20
0
文件: nodekey.py 项目: cloudbase/maas
    def get_token_for_node(self, node):
        """Find node's OAuth token, or if it doesn't have one, create it.

        This implicitly grants cloud-init (running on the node) access to the
        metadata service.

        Barring exceptions, this will always hold:

            get_node_for_key(get_token_for_node(node).key) == node

        :param node: The node that needs an oauth token for access to the
            metadata service.
        :type node: Node
        :return: An OAuth token, belonging to the node-init user, but
            uniquely associated with this node.
        :rtype: piston.models.Token
        """
        nodekey = get_one(self.filter(node=node))
        if nodekey is None:
            return self._create_token(node)
        else:
            return nodekey.token
示例#21
0
 def test_handle_when_URL_is_repeated(self):
     # bin/maas-enlist (in the maas-enlist package) has a bug where the
     # path it uses is doubled up. This was not discovered previously
     # because the API URL patterns were not anchored (see bug 1131323).
     # For compatibility, MAAS will handle requests to obviously incorrect
     # paths. It does *not* redirect because (a) it's not clear that curl
     # (used by maas-enlist) supports HTTP 307 redirects, which are needed
     # to support redirecting POSTs, and (b) curl does not follow redirects
     # by default anyway.
     architecture = factory.getRandomChoice(ARCHITECTURE_CHOICES)
     response = self.client.post(
         '/api/1.0/nodes/MAAS/api/1.0/nodes/',
         {
             'op': 'new',
             'hostname': factory.getRandomString(),
             'architecture': architecture,
             'mac_addresses': ['aa:bb:cc:dd:ee:ff'],
         })
     self.assertEqual(httplib.OK, response.status_code)
     system_id = json.loads(response.content)['system_id']
     nodes = Node.objects.filter(system_id=system_id)
     self.assertIsNotNone(get_one(nodes))
示例#22
0
文件: test_api.py 项目: ocni-dtu/maas
 def test_import_ssh_keys_creates_keys_keysource_and_audit_event(self):
     protocol = random.choice(
         [KEYS_PROTOCOL_TYPE.LP, KEYS_PROTOCOL_TYPE.GH]
     )
     auth_id = factory.make_name("auth_id")
     ks = "%s:%s" % (protocol, auth_id)
     key_string = get_data("data/test_rsa0.pub")
     mock_get_protocol_keys = self.patch(
         keysource_module, "get_protocol_keys"
     )
     mock_get_protocol_keys.return_value = [key_string]
     response = self.client.post(
         reverse("sshkeys_handler"), data=dict(op="import", keysource=ks)
     )
     added_key = get_one(SSHKey.objects.filter(user=self.user))
     self.assertEqual(key_string, added_key.key)
     self.assertEqual(ks, str(added_key.keysource))
     self.assertEqual(http.client.OK, response.status_code, response)
     self.assertThat(
         mock_get_protocol_keys, MockCalledOnceWith(protocol, auth_id)
     )
     event = Event.objects.get(type__level=AUDIT)
     self.assertIsNotNone(event)
     self.assertEqual(event.description, "Imported SSH keys.")
示例#23
0
 def _get_resource(self, rtype, name, architecture, subarchitecture):
     """Return `BootResource` with given rtype, name, architecture, and
     subarchitecture."""
     arch = "%s/%s" % (architecture, subarchitecture)
     return get_one(self.filter(rtype=rtype, name=name, architecture=arch))
示例#24
0
    def update_nodes(self, request, name):
        """@description-title Add or remove nodes by tag
        @description Add or remove nodes associated with the given tag.
        Note that you must supply either the ``add`` or ``remove``
        parameter.

        @param (url-string) "{name}" [required=true] A tag name.
        @param-example "{name}" virtual

        @param (string) "add" [required=false] The system_id to tag.
        @param-example "add" ``fptcnd``

        @param (string) "remove" [required=false] The system_id to untag.
        @param-example "remove" ``xbpf3n``

        @param (string) "definition" [required=false] If given, the
        definition (XPATH expression) will be validated against the
        current definition of the tag. If the value does not match, MAAS
        assumes the worker is out of date and will drop the update.
        @param-example "definition"
            //node[@id="display"]/'clock units="Hz"' > 1000000000

        @param (string) "rack_controller" [required=false] The system ID
        of the rack controller that processed the given tag initially.
        If not given, the requester must be a MAAS admin. If given,
        the requester must be the rack controller.

        @success (json) "success-json" A JSON object representing the
            updated node.
        @success-example "success-json" [exkey=update-nodes-tag] placeholder

        @error (http-status-code) "403" 403
        @error (content) "no-perms" The user does not have the permissions
        required to update the nodes.
        @error-example "no-perms"
            Must be a superuser or supply a rack_controller.

        @error (http-status-code) "409" 409
        @error (content) "no-def-match" The supplied definition doesn't match
        the current definition.
        @error-example "no-def-match"
            Definition supplied 'foobar' doesn't match current definition ''

        @error (http-status-code) "404" 404
        @error (content) "not-found" The requested tag name is not found.
        @error-example "not-found"
            Not Found
        """
        tag = Tag.objects.get_tag_or_404(name=name, user=request.user)
        rack_controller = None
        if not request.user.is_superuser:
            system_id = request.data.get("rack_controller", None)
            if system_id is None:
                raise PermissionDenied(
                    "Must be a superuser or supply a rack_controller")
            rack_controller = get_one(
                RackController.objects.filter(system_id=system_id))
            check_rack_controller_access(request, rack_controller)
        definition = request.data.get("definition", None)
        if definition is not None and tag.definition != definition:
            return HttpResponse(
                "Definition supplied '%s' "
                "doesn't match current definition '%s'" %
                (definition, tag.definition),
                content_type=("text/plain; charset=%s" %
                              settings.DEFAULT_CHARSET),
                status=int(http.client.CONFLICT),
            )
        nodes_to_add = self._get_nodes_for(request, "add")
        tag.node_set.add(*nodes_to_add)
        nodes_to_remove = self._get_nodes_for(request, "remove")
        tag.node_set.remove(*nodes_to_remove)
        return {
            "added": nodes_to_add.count(),
            "removed": nodes_to_remove.count(),
        }
示例#25
0
 def test_get_one_returns_None_for_empty_list(self):
     self.assertIsNone(get_one([]))
示例#26
0
 def get_file(self, sha256):
     """Return file based on SHA256 value."""
     return get_one(self.filter(sha256=sha256))
示例#27
0
 def test_get_one_returns_None_from_any_empty_sequence(self):
     self.assertIsNone(get_one("no item" for counter in range(0)))
示例#28
0
 def test_get_one_returns_single_list_item(self):
     item = factory.getRandomString()
     self.assertEqual(item, get_one([item]))
示例#29
0
 def test_get_one_returns_item_from_any_sequence_of_length_one(self):
     item = factory.getRandomString()
     self.assertEqual(item, get_one(item for counter in range(1)))
示例#30
0
 def test_get_one_returns_item_from_any_sequence_of_length_one(self):
     item = factory.getRandomString()
     self.assertEqual(item, get_one(item for counter in range(1)))
示例#31
0
 def get_by_filename(self, filename):
     return get_one(Template.objects.filter(filename=filename))
示例#32
0
 def test_get_one_returns_None_for_empty_list(self):
     self.assertIsNone(get_one([]))
示例#33
0
 def test_get_one_returns_single_list_item(self):
     item = factory.getRandomString()
     self.assertEqual(item, get_one([item]))
示例#34
0
 def test_delete(self):
     user = factory.make_User()
     token = create_auth_token(user)
     handler = TokenHandler(user, {}, None)
     handler.delete({"id": token.id})
     self.assertIsNone(get_one(get_auth_tokens(user).filter(id=token.id)))
示例#35
0
 def test_get_one_returns_None_from_any_empty_sequence(self):
     self.assertIsNone(get_one("no item" for counter in range(0)))
示例#36
0
def populate_main():
    """Populate the main data all in one transaction."""
    admin = factory.make_admin(username="******",
                               password="******",
                               completed_intro=False)  # noqa
    user1, _ = factory.make_user_with_keys(username="******",
                                           password="******",
                                           completed_intro=False)
    user2, _ = factory.make_user_with_keys(username="******",
                                           password="******",
                                           completed_intro=False)

    # Physical zones.
    zones = [
        factory.make_Zone(name="zone-north"),
        factory.make_Zone(name="zone-south"),
    ]

    # DNS domains.
    domains = [
        Domain.objects.get_default_domain(),
        factory.make_Domain("sample"),
        factory.make_Domain("ubnt"),
    ]

    # Create the fabrics that will be used by the regions, racks,
    # machines, and devices.
    fabric0 = Fabric.objects.get_default_fabric()
    fabric0_untagged = fabric0.get_default_vlan()
    fabric0_vlan10 = factory.make_VLAN(fabric=fabric0, vid=10)
    fabric1 = factory.make_Fabric()
    fabric1_untagged = fabric1.get_default_vlan()
    fabric1_vlan42 = factory.make_VLAN(fabric=fabric1, vid=42)
    empty_fabric = factory.make_Fabric()  # noqa

    # Create some spaces.
    space_mgmt = factory.make_Space("management")
    space_storage = factory.make_Space("storage")
    space_internal = factory.make_Space("internal")
    space_ipv6_testbed = factory.make_Space("ipv6-testbed")

    # Subnets used by regions, racks, machines, and devices.
    subnet_1 = factory.make_Subnet(
        cidr="172.16.1.0/24",
        gateway_ip="172.16.1.1",
        vlan=fabric0_untagged,
        space=space_mgmt,
    )
    subnet_2 = factory.make_Subnet(
        cidr="172.16.2.0/24",
        gateway_ip="172.16.2.1",
        vlan=fabric1_untagged,
        space=space_mgmt,
    )
    subnet_3 = factory.make_Subnet(
        cidr="172.16.3.0/24",
        gateway_ip="172.16.3.1",
        vlan=fabric0_vlan10,
        space=space_storage,
    )
    subnet_4 = factory.make_Subnet(  # noqa
        cidr="172.16.4.0/24",
        gateway_ip="172.16.4.1",
        vlan=fabric0_vlan10,
        space=space_internal,
    )
    subnet_2001_db8_42 = factory.make_Subnet(  # noqa
        cidr="2001:db8:42::/64",
        gateway_ip="",
        vlan=fabric1_vlan42,
        space=space_ipv6_testbed,
    )
    ipv4_subnets = [subnet_1, subnet_2, subnet_3, subnet_4]

    # Static routes on subnets.
    factory.make_StaticRoute(source=subnet_1, destination=subnet_2)
    factory.make_StaticRoute(source=subnet_1, destination=subnet_3)
    factory.make_StaticRoute(source=subnet_1, destination=subnet_4)
    factory.make_StaticRoute(source=subnet_2, destination=subnet_1)
    factory.make_StaticRoute(source=subnet_2, destination=subnet_3)
    factory.make_StaticRoute(source=subnet_2, destination=subnet_4)
    factory.make_StaticRoute(source=subnet_3, destination=subnet_1)
    factory.make_StaticRoute(source=subnet_3, destination=subnet_2)
    factory.make_StaticRoute(source=subnet_3, destination=subnet_4)
    factory.make_StaticRoute(source=subnet_4, destination=subnet_1)
    factory.make_StaticRoute(source=subnet_4, destination=subnet_2)
    factory.make_StaticRoute(source=subnet_4, destination=subnet_3)

    # Load builtin scripts in the database so we can generate fake results
    # below.
    load_builtin_scripts()

    hostname = gethostname()
    region_rack = get_one(
        Node.objects.filter(node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER,
                            hostname=hostname))
    # If "make run" executes before "make sampledata", the rack may have
    # already registered.
    if region_rack is None:
        region_rack = factory.make_Node(
            node_type=NODE_TYPE.REGION_AND_RACK_CONTROLLER,
            hostname=hostname,
            interface=False,
        )

        # Get list of mac addresses that should be used for the region
        # rack controller. This will make sure the RegionAdvertisingService
        # picks the correct region on first start-up and doesn't get multiple.
        mac_addresses = get_mac_addresses()

        def get_next_mac():
            try:
                return mac_addresses.pop()
            except IndexError:
                return factory.make_mac_address()

        # Region and rack controller (hostname of dev machine)
        #   eth0     - fabric 0 - untagged
        #   eth1     - fabric 0 - untagged
        #   eth2     - fabric 1 - untagged - 172.16.2.2/24 - static
        #   bond0    - fabric 0 - untagged - 172.16.1.2/24 - static
        #   bond0.10 - fabric 0 - 10       - 172.16.3.2/24 - static
        eth0 = factory.make_Interface(
            INTERFACE_TYPE.PHYSICAL,
            name="eth0",
            node=region_rack,
            vlan=fabric0_untagged,
            mac_address=get_next_mac(),
        )
        eth1 = factory.make_Interface(
            INTERFACE_TYPE.PHYSICAL,
            name="eth1",
            node=region_rack,
            vlan=fabric0_untagged,
            mac_address=get_next_mac(),
        )
        eth2 = factory.make_Interface(
            INTERFACE_TYPE.PHYSICAL,
            name="eth2",
            node=region_rack,
            vlan=fabric1_untagged,
            mac_address=get_next_mac(),
        )
        bond0 = factory.make_Interface(
            INTERFACE_TYPE.BOND,
            name="bond0",
            node=region_rack,
            vlan=fabric0_untagged,
            parents=[eth0, eth1],
            mac_address=eth0.mac_address,
        )
        bond0_10 = factory.make_Interface(
            INTERFACE_TYPE.VLAN,
            node=region_rack,
            vlan=fabric0_vlan10,
            parents=[bond0],
        )
        factory.make_StaticIPAddress(
            alloc_type=IPADDRESS_TYPE.STICKY,
            ip="172.16.1.2",
            subnet=subnet_1,
            interface=bond0,
        )
        factory.make_StaticIPAddress(
            alloc_type=IPADDRESS_TYPE.STICKY,
            ip="172.16.2.2",
            subnet=subnet_2,
            interface=eth2,
        )
        factory.make_StaticIPAddress(
            alloc_type=IPADDRESS_TYPE.STICKY,
            ip="172.16.3.2",
            subnet=subnet_3,
            interface=bond0_10,
        )
        fabric0_untagged.primary_rack = region_rack
        fabric0_untagged.save()
        fabric1_untagged.primary_rack = region_rack
        fabric1_untagged.save()
        fabric0_vlan10.primary_rack = region_rack
        fabric0_vlan10.save()

    # Rack controller (happy-rack)
    #   eth0     - fabric 0 - untagged
    #   eth1     - fabric 0 - untagged
    #   eth2     - fabric 1 - untagged - 172.16.2.3/24 - static
    #   bond0    - fabric 0 - untagged - 172.16.1.3/24 - static
    #   bond0.10 - fabric 0 - 10       - 172.16.3.3/24 - static
    rack = factory.make_Node(
        node_type=NODE_TYPE.RACK_CONTROLLER,
        hostname="happy-rack",
        interface=False,
    )
    eth0 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL,
                                  name="eth0",
                                  node=rack,
                                  vlan=fabric0_untagged)
    eth1 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL,
                                  name="eth1",
                                  node=rack,
                                  vlan=fabric0_untagged)
    eth2 = factory.make_Interface(INTERFACE_TYPE.PHYSICAL,
                                  name="eth2",
                                  node=rack,
                                  vlan=fabric1_untagged)
    bond0 = factory.make_Interface(
        INTERFACE_TYPE.BOND,
        name="bond0",
        node=rack,
        vlan=fabric0_untagged,
        parents=[eth0, eth1],
    )
    bond0_10 = factory.make_Interface(INTERFACE_TYPE.VLAN,
                                      node=rack,
                                      vlan=fabric0_vlan10,
                                      parents=[bond0])
    factory.make_StaticIPAddress(
        alloc_type=IPADDRESS_TYPE.STICKY,
        ip="172.16.1.3",
        subnet=subnet_1,
        interface=bond0,
    )
    factory.make_StaticIPAddress(
        alloc_type=IPADDRESS_TYPE.STICKY,
        ip="172.16.2.3",
        subnet=subnet_2,
        interface=eth2,
    )
    factory.make_StaticIPAddress(
        alloc_type=IPADDRESS_TYPE.STICKY,
        ip="172.16.3.3",
        subnet=subnet_3,
        interface=bond0_10,
    )
    fabric0_untagged.secondary_rack = rack
    fabric0_untagged.save()
    fabric1_untagged.secondary_rack = rack
    fabric1_untagged.save()
    fabric0_vlan10.secondary_rack = rack
    fabric0_vlan10.save()

    # Region controller (happy-region)
    #   eth0     - fabric 0 - untagged
    #   eth1     - fabric 0 - untagged
    #   eth2     - fabric 1 - untagged - 172.16.2.4/24 - static
    #   bond0    - fabric 0 - untagged - 172.16.1.4/24 - static
    #   bond0.10 - fabric 0 - 10       - 172.16.3.4/24 - static
    region = factory.make_Node(
        node_type=NODE_TYPE.REGION_CONTROLLER,
        hostname="happy-region",
        interface=False,
    )
    eth0 = factory.make_Interface(
        INTERFACE_TYPE.PHYSICAL,
        name="eth0",
        node=region,
        vlan=fabric0_untagged,
    )
    eth1 = factory.make_Interface(
        INTERFACE_TYPE.PHYSICAL,
        name="eth1",
        node=region,
        vlan=fabric0_untagged,
    )
    eth2 = factory.make_Interface(
        INTERFACE_TYPE.PHYSICAL,
        name="eth2",
        node=region,
        vlan=fabric1_untagged,
    )
    bond0 = factory.make_Interface(
        INTERFACE_TYPE.BOND,
        name="bond0",
        node=region,
        vlan=fabric0_untagged,
        parents=[eth0, eth1],
    )
    bond0_10 = factory.make_Interface(INTERFACE_TYPE.VLAN,
                                      node=region,
                                      vlan=fabric0_vlan10,
                                      parents=[bond0])
    factory.make_StaticIPAddress(
        alloc_type=IPADDRESS_TYPE.STICKY,
        ip="172.16.1.4",
        subnet=subnet_1,
        interface=bond0,
    )
    factory.make_StaticIPAddress(
        alloc_type=IPADDRESS_TYPE.STICKY,
        ip="172.16.2.4",
        subnet=subnet_2,
        interface=eth2,
    )
    factory.make_StaticIPAddress(
        alloc_type=IPADDRESS_TYPE.STICKY,
        ip="172.16.3.4",
        subnet=subnet_3,
        interface=bond0_10,
    )

    # Create one machine for every status. Each machine has a random interface
    # and storage configration.
    node_statuses = [
        status for status in map_enum(NODE_STATUS).items() if status not in
        [NODE_STATUS.MISSING, NODE_STATUS.RESERVED, NODE_STATUS.RETIRED]
    ]
    machines = []
    test_scripts = [
        script.name
        for script in Script.objects.filter(script_type=SCRIPT_TYPE.TESTING)
    ]
    for _, status in node_statuses:
        owner = None
        if status in ALLOCATED_NODE_STATUSES:
            owner = random.choice([admin, user1, user2])
        elif status in [
                NODE_STATUS.COMMISSIONING,
                NODE_STATUS.FAILED_RELEASING,
        ]:
            owner = admin

        machine = factory.make_Node(
            status=status,
            owner=owner,
            zone=random.choice(zones),
            interface=False,
            with_boot_disk=False,
            power_type="manual",
            domain=random.choice(domains),
            memory=random.choice([1024, 4096, 8192]),
            description=random.choice([
                "",
                "Scheduled for removeal",
                "Firmware old",
                "Earmarked for Project Fuse in April",
            ]),
            cpu_count=random.randint(2, 8),
        )
        machine.set_random_hostname()
        machines.append(machine)

        # Create random network configuration.
        RandomInterfaceFactory.create_random(machine)

        # Add random storage devices and set a random layout.
        for _ in range(random.randint(1, 5)):
            factory.make_PhysicalBlockDevice(
                node=machine,
                size=random.randint(LARGE_BLOCK_DEVICE,
                                    LARGE_BLOCK_DEVICE * 10),
            )
        if status in [
                NODE_STATUS.READY,
                NODE_STATUS.ALLOCATED,
                NODE_STATUS.DEPLOYING,
                NODE_STATUS.DEPLOYED,
                NODE_STATUS.FAILED_DEPLOYMENT,
                NODE_STATUS.RELEASING,
                NODE_STATUS.FAILED_RELEASING,
        ]:
            machine.set_storage_layout(
                random.choice([
                    layout for layout in STORAGE_LAYOUTS.keys()
                    if layout != "vmfs6"
                ]))
            if status != NODE_STATUS.READY:
                machine._create_acquired_filesystems()

        # Add a random amount of events.
        for _ in range(random.randint(25, 100)):
            factory.make_Event(node=machine)

        # Add in commissioning and testing results.
        if status != NODE_STATUS.NEW:
            for _ in range(0, random.randint(1, 10)):
                css = ScriptSet.objects.create_commissioning_script_set(
                    machine)
                scripts = set()
                for __ in range(1, len(test_scripts)):
                    scripts.add(random.choice(test_scripts))
                tss = ScriptSet.objects.create_testing_script_set(
                    machine, list(scripts))
            machine.current_commissioning_script_set = css
            machine.current_testing_script_set = tss
            machine.save()

        # Fill in historic results
        for script_set in machine.scriptset_set.all():
            if script_set in [css, tss]:
                continue
            for script_result in script_set:
                # Can't use script_result.store_result as it will try to
                # process the result and fail on the fake data.
                script_result.exit_status = random.randint(0, 255)
                if script_result.exit_status == 0:
                    script_result.status = SCRIPT_STATUS.PASSED
                else:
                    script_result.status = random.choice(
                        list(SCRIPT_STATUS_FAILED))
                script_result.started = factory.make_date()
                script_result.ended = script_result.started + timedelta(
                    seconds=random.randint(0, 10000))
                script_result.stdout = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.stderr = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.output = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.save()

        # Only add in results in states where commissiong should be completed.
        if status not in [NODE_STATUS.NEW, NODE_STATUS.COMMISSIONING]:
            if status == NODE_STATUS.FAILED_COMMISSIONING:
                exit_status = random.randint(1, 255)
                script_status = random.choice(list(SCRIPT_STATUS_FAILED))
            else:
                exit_status = 0
                script_status = SCRIPT_STATUS.PASSED
            for script_result in css:
                # Can't use script_result.store_result as it will try to
                # process the result and fail on the fake data.
                script_result.status = script_status
                script_result.exit_status = exit_status
                script_result.started = factory.make_date()
                script_result.ended = script_result.started + timedelta(
                    seconds=random.randint(0, 10000))
                script_result.stdout = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.stderr = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.output = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.save()
        elif status == NODE_STATUS.COMMISSIONING:
            for script_result in css:
                script_result.status = random.choice(
                    list(SCRIPT_STATUS_RUNNING_OR_PENDING))
                if script_result.status != SCRIPT_STATUS.PENDING:
                    script_result.started = factory.make_date()
                script_result.save()

        # Only add in results in states where testing should be completed.
        if status not in [NODE_STATUS.NEW, NODE_STATUS.TESTING]:
            if status == NODE_STATUS.FAILED_TESTING:
                exit_status = random.randint(1, 255)
                script_status = random.choice(list(SCRIPT_STATUS_FAILED))
            else:
                exit_status = 0
                script_status = SCRIPT_STATUS.PASSED
            for script_result in tss:
                # Can't use script_result.store_result as it will try to
                # process the result and fail on the fake data.
                script_result.status = script_status
                script_result.exit_status = exit_status
                script_result.started = factory.make_date()
                script_result.ended = script_result.started + timedelta(
                    seconds=random.randint(0, 10000))
                script_result.stdout = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.stderr = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.output = Bin(
                    factory.make_string().encode("utf-8"))
                script_result.save()
        elif status == NODE_STATUS.TESTING:
            for script_result in tss:
                script_result.status = random.choice(
                    list(SCRIPT_STATUS_RUNNING_OR_PENDING))
                if script_result.status != SCRIPT_STATUS.PENDING:
                    script_result.started = factory.make_date()
                script_result.save()

        # Add installation results.
        if status in [
                NODE_STATUS.DEPLOYING,
                NODE_STATUS.DEPLOYED,
                NODE_STATUS.FAILED_DEPLOYMENT,
        ]:
            script_set = ScriptSet.objects.create_installation_script_set(
                machine)
            machine.current_installation_script_set = script_set
            machine.save()

        if status == NODE_STATUS.DEPLOYED:
            for script_result in machine.current_installation_script_set:
                stdout = factory.make_string().encode("utf-8")
                script_result.store_result(0, stdout)
        elif status == NODE_STATUS.FAILED_DEPLOYMENT:
            for script_result in machine.current_installation_script_set:
                exit_status = random.randint(1, 255)
                stdout = factory.make_string().encode("utf-8")
                stderr = factory.make_string().encode("utf-8")
                script_result.store_result(exit_status, stdout, stderr)

        # Add children devices to the deployed machine.
        if status == NODE_STATUS.DEPLOYED:
            boot_interface = machine.get_boot_interface()
            for _ in range(5):
                device = factory.make_Device(
                    interface=True,
                    domain=machine.domain,
                    parent=machine,
                    vlan=boot_interface.vlan,
                )
                device.set_random_hostname()
                RandomInterfaceFactory.assign_ip(
                    device.get_boot_interface(),
                    alloc_type=IPADDRESS_TYPE.STICKY,
                )

    # Create a few pods to and assign a random set of the machines to the pods.
    pods = [None]
    pod_storage_pools = defaultdict(list)
    machines_in_pods = defaultdict(list)
    for _ in range(3):
        subnet = random.choice(ipv4_subnets)
        ip = factory.pick_ip_in_Subnet(subnet)
        ip_address = factory.make_StaticIPAddress(
            alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet)
        power_address = "qemu+ssh://ubuntu@%s/system" % ip
        pod = factory.make_Pod(
            pod_type="virsh",
            parameters={"power_address": power_address},
            ip_address=ip_address,
            capabilities=[
                Capabilities.DYNAMIC_LOCAL_STORAGE,
                Capabilities.COMPOSABLE,
            ],
        )
        for _ in range(3):
            pool = factory.make_PodStoragePool(pod)
            pod_storage_pools[pod].append(pool)
        pod.default_storage_pool = pool
        pod.save()
        pods.append(pod)
    for _ in range(3):
        subnet = random.choice(ipv4_subnets)
        ip = factory.pick_ip_in_Subnet(subnet)
        ip_address = factory.make_StaticIPAddress(
            alloc_type=IPADDRESS_TYPE.STICKY, ip=ip, subnet=subnet)
        power_address = "%s" % ip
        pod = factory.make_Pod(
            pod_type="rsd",
            parameters={
                "power_address": power_address,
                "power_user": "******",
                "power_pass": "******",
            },
            ip_address=ip_address,
            capabilities=[
                Capabilities.DYNAMIC_LOCAL_STORAGE,
                Capabilities.COMPOSABLE,
            ],
        )
        for _ in range(3):
            pool = factory.make_PodStoragePool(pod)
            pod_storage_pools[pod].append(pool)
        pod.default_storage_pool = pool
        pod.save()
        pods.append(pod)
    for machine in machines:
        # Add the machine to the pod if its lucky day!
        pod = random.choice(pods)
        if pod is not None:
            machine.bmc = pod
            machine.instance_power_parameters = {"power_id": machine.hostname}
            machine.save()
            machines_in_pods[pod].append(machine)

            # Assign the block devices on the machine to a storage pool.
            for block_device in machine.physicalblockdevice_set.all():
                block_device.storage_pool = random.choice(
                    pod_storage_pools[pod])
                block_device.save()

    # Update the pod attributes so that it has more available then used.
    for pod in pods[1:]:
        pod.cores = pod.get_used_cores() + random.randint(4, 8)
        pod.memory = pod.get_used_memory() + random.choice(
            [1024, 2048, 4096, 4096 * 4, 4096 * 8])
        pod.local_storage = sum(pool.storage
                                for pool in pod_storage_pools[pod])
        pod.save()

    # Create a few devices.
    for _ in range(10):
        device = factory.make_Device(interface=True)
        device.set_random_hostname()

    # Add some DHCP snippets.
    # - Global
    factory.make_DHCPSnippet(
        name="foo class",
        description="adds class for vender 'foo'",
        value=VersionedTextFile.objects.create(data=dedent("""\
            class "foo" {
                match if substring (
                    option vendor-class-identifier, 0, 3) = "foo";
            }
        """)),
    )
    factory.make_DHCPSnippet(
        name="bar class",
        description="adds class for vender 'bar'",
        value=VersionedTextFile.objects.create(data=dedent("""\
            class "bar" {
                match if substring (
                    option vendor-class-identifier, 0, 3) = "bar";
            }
        """)),
        enabled=False,
    )
    # - Subnet
    factory.make_DHCPSnippet(
        name="600 lease time",
        description="changes lease time to 600 secs.",
        value=VersionedTextFile.objects.create(data="default-lease-time 600;"),
        subnet=subnet_1,
    )
    factory.make_DHCPSnippet(
        name="7200 max lease time",
        description="changes max lease time to 7200 secs.",
        value=VersionedTextFile.objects.create(data="max-lease-time 7200;"),
        subnet=subnet_2,
        enabled=False,
    )
    # - Node
    factory.make_DHCPSnippet(
        name="boot from other server",
        description="instructs device to boot from other server",
        value=VersionedTextFile.objects.create(data=dedent("""\
            filename "test-boot";
            server-name "boot.from.me";
        """)),
        node=device,
    )

    # Add notifications for admins, users, and each individual user, and for
    # each notification category.
    factory.make_Notification(
        "Attention admins! Core critical! Meltdown imminent! Evacuate "
        "habitat immediately!",
        admins=True,
        category="error",
    )
    factory.make_Notification(
        "Dear users, rumours of a core meltdown are unfounded. Please "
        "return to your home-pods and places of business.",
        users=True,
        category="warning",
    )
    factory.make_Notification(
        "FREE! For the next 2 hours get FREE blueberry and iodine pellets "
        "at the nutri-dispensers.",
        users=True,
        category="success",
    )
    for user in User.objects.all():
        context = {"name": user.username.capitalize()}
        factory.make_Notification(
            "Greetings, {name}! Get away from the habitat for the weekend and "
            "visit the Mare Nubium with MAAS Tours. Use the code METAL to "
            "claim a special gift!",
            user=user,
            context=context,
            category="info",
        )
示例#37
0
 def test_delete(self):
     user = factory.make_User()
     sshkey = factory.make_SSHKey(user=user)
     handler = SSHKeyHandler(user, {}, None)
     handler.delete({"id": sshkey.id})
     self.assertIsNone(get_one(SSHKey.objects.filter(id=sshkey.id)))
示例#38
0
文件: hooks.py 项目: pfxuan/maas
def retag_node_for_hardware_by_modalias(
        node, modaliases, parent_tag_name, hardware_descriptors):
    """Adds or removes tags on a node based on its modaliases.

    Returns the Tag model objects added and removed, respectively.

    :param node: The node whose tags to modify.
    :param modaliases: The modaliases discovered on the node.
    :param parent_tag_name: The tag name for the hardware type given in the
        `hardware_descriptors` list. For example, if switch ASICs are being
        discovered, the string "switch" might be appropriate. Then, if switch
        hardware is found, the node will be tagged with the matching
        descriptors' tag(s), *and* with the more general "switch" tag.
    :param hardware_descriptors: A list of hardware descriptor dictionaries.

    :returns: tuple of (tags_added, tags_removed)
    """
    # Don't unconditionally create the tag. Check for it with a filter first.
    parent_tag = get_one(Tag.objects.filter(name=parent_tag_name))
    tags_added = set()
    tags_removed = set()
    discovered_hardware, ruled_out_hardware = determine_hardware_matches(
        modaliases, hardware_descriptors)
    if len(discovered_hardware) > 0:
        if parent_tag is None:
            # Create the tag "just in time" if we found matching hardware, and
            # we hadn't created the tag yet.
            parent_tag = Tag(name=parent_tag_name)
            parent_tag.save()
        node.tags.add(parent_tag)
        tags_added.add(parent_tag)
        logger.info(
            "%s: Added tag '%s' for detected hardware type." % (
                node.hostname, parent_tag_name))
        for descriptor in discovered_hardware:
            tag = descriptor['tag']
            comment = descriptor['comment']
            matches = descriptor['matches']
            hw_tag, _ = Tag.objects.get_or_create(name=tag, defaults={
                'comment': comment
            })
            node.tags.add(hw_tag)
            tags_added.add(hw_tag)
            logger.info(
                "%s: Added tag '%s' for detected hardware: %s "
                "(Matched: %s)." % (node.hostname, tag, comment, matches))
    else:
        if parent_tag is not None:
            node.tags.remove(parent_tag)
            tags_removed.add(parent_tag)
            logger.info(
                "%s: Removed tag '%s'; machine does not match hardware "
                "description." % (node.hostname, parent_tag_name))
    for descriptor in ruled_out_hardware:
        tag_name = descriptor['tag']
        existing_tag = get_one(node.tags.filter(name=tag_name))
        if existing_tag is not None:
            node.tags.remove(existing_tag)
            tags_removed.add(existing_tag)
            logger.info(
                "%s: Removed tag '%s'; hardware is missing." % (
                    node.hostname, tag_name))
    return tags_added, tags_removed
示例#39
0
def get_uploaded_resource_with_name(resources, name):
    """Return the `BootResource` from `resources` that has the given `name`.
    """
    return get_one(resources.filter(name=name))