예제 #1
0
 def _test_get_node_vif_ids_two_ports(self, key):
     if key == "extra":
         kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
         kwargs2 = {key: {'vif_port_id': 'test-vif-B'}}
     else:
         kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
         kwargs2 = {key: {'tenant_vif_port_id': 'test-vif-B'}}
     port1 = db_utils.create_test_port(node_id=self.node.id,
                                       address='aa:bb:cc:dd:ee:ff',
                                       uuid=uuidutils.generate_uuid(),
                                       driver='fake',
                                       **kwargs1)
     port2 = db_utils.create_test_port(node_id=self.node.id,
                                       address='dd:ee:ff:aa:bb:cc',
                                       uuid=uuidutils.generate_uuid(),
                                       driver='fake',
                                       **kwargs2)
     expected = {
         'portgroups': {},
         'ports': {
             port1.uuid: 'test-vif-A',
             port2.uuid: 'test-vif-B'
         }
     }
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #2
0
    def test_get_node_vif_ids_one_portgroup(self):
        pg1 = db_utils.create_test_portgroup(node_id=self.node.id, extra={"vif_port_id": "test-vif-A"})

        expected = {"portgroups": {pg1.uuid: "test-vif-A"}, "ports": {}}
        with task_manager.acquire(self.context, self.node.uuid) as task:
            result = network.get_node_vif_ids(task)
        self.assertEqual(expected, result)
예제 #3
0
    def test_get_node_vif_ids_one_portgroup(self):
        pg1 = db_utils.create_test_portgroup(
            node_id=self.node.id, extra={'vif_port_id': 'test-vif-A'})

        expected = {'portgroups': {pg1.uuid: 'test-vif-A'}, 'ports': {}}
        with task_manager.acquire(self.context, self.node.uuid) as task:
            result = network.get_node_vif_ids(task)
        self.assertEqual(expected, result)
예제 #4
0
    def test_get_node_vif_ids_one_portgroup_int_info(self):
        kwargs1 = {'internal_info': {'tenant_vif_port_id': 'test-vif-A'}}
        pg1 = db_utils.create_test_portgroup(node_id=self.node.id, **kwargs1)

        expected = {'portgroups': {pg1.uuid: 'test-vif-A'}, 'ports': {}}
        with task_manager.acquire(self.context, self.node.uuid) as task:
            result = network.get_node_vif_ids(task)
        self.assertEqual(expected, result)
예제 #5
0
    def update_dhcp_opts(self, task, options, vifs=None):
        """Send or update the DHCP BOOT options for this node.

        :param task: A TaskManager instance.
        :param options: this will be a list of dicts, e.g.

                          ::

                           [{'opt_name': 'bootfile-name',
                             'opt_value': 'pxelinux.0'},
                            {'opt_name': 'server-ip-address',
                             'opt_value': '123.123.123.456'},
                            {'opt_name': 'tftp-server',
                             'opt_value': '123.123.123.123'}]
        :param vifs: a dict of Neutron port dicts to update DHCP options on.
            The keys should be Ironic port UUIDs, and the values should be
            Neutron port UUIDs
            If the value is None, will get the list of ports from the Ironic
            port objects.
        """
        if vifs is None:
            vifs = network.get_node_vif_ids(task)
        if not vifs:
            raise exception.FailedToUpdateDHCPOptOnPort(
                _("No VIFs found for node %(node)s when attempting " "to update DHCP BOOT options.")
                % {"node": task.node.uuid}
            )

        failures = []
        for port_id, port_vif in vifs.items():
            try:
                self.update_port_dhcp_opts(port_vif, options, token=task.context.auth_token)
            except exception.FailedToUpdateDHCPOptOnPort:
                failures.append(port_id)

        if failures:
            if len(failures) == len(vifs):
                raise exception.FailedToUpdateDHCPOptOnPort(
                    _("Failed to set DHCP BOOT options for any port on node %s.") % task.node.uuid
                )
            else:
                LOG.warning(
                    _LW(
                        "Some errors were encountered when updating "
                        "the DHCP BOOT options for node %(node)s on "
                        "the following ports: %(ports)s."
                    ),
                    {"node": task.node.uuid, "ports": failures},
                )

        # TODO(adam_g): Hack to workaround bug 1334447 until we have a
        # mechanism for synchronizing events with Neutron.  We need to sleep
        # only if we are booting VMs, which is implied by SSHPower, to ensure
        # they do not boot before Neutron agents have setup sufficient DHCP
        # config for netboot.
        if isinstance(task.driver.power, ssh.SSHPower):
            LOG.debug("Waiting 15 seconds for Neutron.")
            time.sleep(15)
예제 #6
0
    def update_dhcp_opts(self, task, options, vifs=None):
        """Send or update the DHCP BOOT options for this node.

        :param task: A TaskManager instance.
        :param options: this will be a list of dicts, e.g.

                          ::

                           [{'opt_name': 'bootfile-name',
                             'opt_value': 'pxelinux.0'},
                            {'opt_name': 'server-ip-address',
                             'opt_value': '123.123.123.456'},
                            {'opt_name': 'tftp-server',
                             'opt_value': '123.123.123.123'}]
        :param vifs: a dict of Neutron port dicts to update DHCP options on.
            The keys should be Ironic port UUIDs, and the values should be
            Neutron port UUIDs
            If the value is None, will get the list of ports from the Ironic
            port objects.
        """
        if vifs is None:
            vifs = network.get_node_vif_ids(task)
        if not vifs:
            raise exception.FailedToUpdateDHCPOptOnPort(
                _("No VIFs found for node %(node)s when attempting "
                  "to update DHCP BOOT options.") % {'node': task.node.uuid})

        failures = []
        for port_id, port_vif in vifs.items():
            try:
                self.update_port_dhcp_opts(port_vif,
                                           options,
                                           token=task.context.auth_token)
            except exception.FailedToUpdateDHCPOptOnPort:
                failures.append(port_id)

        if failures:
            if len(failures) == len(vifs):
                raise exception.FailedToUpdateDHCPOptOnPort(
                    _("Failed to set DHCP BOOT options for any port on node %s."
                      ) % task.node.uuid)
            else:
                LOG.warning(
                    _LW("Some errors were encountered when updating "
                        "the DHCP BOOT options for node %(node)s on "
                        "the following ports: %(ports)s."), {
                            'node': task.node.uuid,
                            'ports': failures
                        })

        # TODO(adam_g): Hack to workaround bug 1334447 until we have a
        # mechanism for synchronizing events with Neutron.  We need to sleep
        # only if we are booting VMs, which is implied by SSHPower, to ensure
        # they do not boot before Neutron agents have setup sufficient DHCP
        # config for netboot.
        if isinstance(task.driver.power, ssh.SSHPower):
            LOG.debug("Waiting 15 seconds for Neutron.")
            time.sleep(15)
예제 #7
0
 def test_get_node_vif_ids_one_port(self):
     port1 = db_utils.create_test_port(node_id=self.node.id,
                                       address='aa:bb:cc',
                                       uuid=utils.generate_uuid(),
                                       extra={'vif_port_id': 'test-vif-A'},
                                       driver='fake')
     expected = {port1.uuid: 'test-vif-A'}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #8
0
 def test_get_node_vif_ids_one_port_int_info(self):
     kwargs1 = {'internal_info': {'tenant_vif_port_id': 'test-vif-A'}}
     port1 = db_utils.create_test_port(node_id=self.node.id,
                                       address='aa:bb:cc:dd:ee:ff',
                                       uuid=uuidutils.generate_uuid(),
                                       **kwargs1)
     expected = {'portgroups': {}, 'ports': {port1.uuid: 'test-vif-A'}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #9
0
 def test_get_node_vif_ids_one_port(self):
     port1 = db_utils.create_test_port(node_id=self.node.id,
                                       address='aa:bb:cc:dd:ee:ff',
                                       uuid=uuidutils.generate_uuid(),
                                       extra={'vif_port_id': 'test-vif-A'},
                                       driver='fake')
     expected = {port1.uuid: 'test-vif-A'}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #10
0
    def _test_get_node_vif_ids_one_portgroup(self, key):
        if key == "extra":
            kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
        else:
            kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
        pg1 = db_utils.create_test_portgroup(node_id=self.node.id, **kwargs1)

        expected = {'portgroups': {pg1.uuid: 'test-vif-A'}, 'ports': {}}
        with task_manager.acquire(self.context, self.node.uuid) as task:
            result = network.get_node_vif_ids(task)
        self.assertEqual(expected, result)
예제 #11
0
 def _test_get_node_vif_ids_multitenancy(self, int_info_key):
     port = db_utils.create_test_port(
         node_id=self.node.id, address="aa:bb:cc:dd:ee:ff", internal_info={int_info_key: "test-vif-A"}
     )
     portgroup = db_utils.create_test_portgroup(
         node_id=self.node.id, address="dd:ee:ff:aa:bb:cc", internal_info={int_info_key: "test-vif-B"}
     )
     expected = {"ports": {port.uuid: "test-vif-A"}, "portgroups": {portgroup.uuid: "test-vif-B"}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #12
0
 def test_get_node_vif_ids_one_port(self):
     port1 = db_utils.create_test_port(
         node_id=self.node.id,
         address="aa:bb:cc:dd:ee:ff",
         uuid=uuidutils.generate_uuid(),
         extra={"vif_port_id": "test-vif-A"},
         driver="fake",
     )
     expected = {"portgroups": {}, "ports": {port1.uuid: "test-vif-A"}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #13
0
 def test_get_node_vif_ids_during_cleaning(self):
     port = db_utils.create_test_port(
         node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
         internal_info={'cleaning_vif_port_id': 'test-vif-A'})
     portgroup = db_utils.create_test_portgroup(
         node_id=self.node.id, address='dd:ee:ff:aa:bb:cc',
         internal_info={'cleaning_vif_port_id': 'test-vif-B'})
     expected = {'portgroups': {portgroup.uuid: 'test-vif-B'},
                 'ports': {port.uuid: 'test-vif-A'}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #14
0
 def _test_get_node_vif_ids_multitenancy(self, int_info_key):
     port = db_utils.create_test_port(
         node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
         internal_info={int_info_key: 'test-vif-A'})
     portgroup = db_utils.create_test_portgroup(
         node_id=self.node.id, address='dd:ee:ff:aa:bb:cc',
         internal_info={int_info_key: 'test-vif-B'})
     expected = {'ports': {port.uuid: 'test-vif-A'},
                 'portgroups': {portgroup.uuid: 'test-vif-B'}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #15
0
 def test_remove_vifs_from_node(self):
     db_utils.create_test_port(
         node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
         internal_info={driver_common.TENANT_VIF_KEY: 'test-vif-A'})
     db_utils.create_test_portgroup(
         node_id=self.node.id, address='dd:ee:ff:aa:bb:cc',
         internal_info={driver_common.TENANT_VIF_KEY: 'test-vif-B'})
     with task_manager.acquire(self.context, self.node.uuid) as task:
         network.remove_vifs_from_node(task)
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual({}, result['ports'])
     self.assertEqual({}, result['portgroups'])
예제 #16
0
    def _test_get_node_vif_ids_one_portgroup(self, key):
        if key == "extra":
            kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
        else:
            kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
        pg1 = db_utils.create_test_portgroup(
            node_id=self.node.id, **kwargs1)

        expected = {'portgroups': {pg1.uuid: 'test-vif-A'},
                    'ports': {}}
        with task_manager.acquire(self.context, self.node.uuid) as task:
            result = network.get_node_vif_ids(task)
        self.assertEqual(expected, result)
예제 #17
0
 def test_get_node_vif_ids_two_portgroups(self):
     pg1 = db_utils.create_test_portgroup(node_id=self.node.id, extra={"vif_port_id": "test-vif-A"})
     pg2 = db_utils.create_test_portgroup(
         uuid=uuidutils.generate_uuid(),
         address="dd:ee:ff:aa:bb:cc",
         node_id=self.node.id,
         name="barname",
         extra={"vif_port_id": "test-vif-B"},
     )
     expected = {"portgroups": {pg1.uuid: "test-vif-A", pg2.uuid: "test-vif-B"}, "ports": {}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #18
0
 def _test_get_node_vif_ids_one_port(self, key):
     if key == "extra":
         kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
     else:
         kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
     port1 = db_utils.create_test_port(node_id=self.node.id,
                                       address='aa:bb:cc:dd:ee:ff',
                                       uuid=uuidutils.generate_uuid(),
                                       **kwargs1)
     expected = {'portgroups': {},
                 'ports': {port1.uuid: 'test-vif-A'}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #19
0
 def test_remove_vifs_from_node_failure(self, mock_unbind):
     db_utils.create_test_port(
         node_id=self.node.id, address='aa:bb:cc:dd:ee:ff',
         internal_info={driver_common.TENANT_VIF_KEY: 'test-vif-A'})
     db_utils.create_test_portgroup(
         node_id=self.node.id, address='dd:ee:ff:aa:bb:cc',
         internal_info={driver_common.TENANT_VIF_KEY: 'test-vif-B'})
     mock_unbind.side_effect = [exception.NetworkError, None]
     with task_manager.acquire(self.context, self.node.uuid) as task:
         network.remove_vifs_from_node(task)
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual({}, result['ports'])
     self.assertEqual({}, result['portgroups'])
     self.assertEqual(2, mock_unbind.call_count)
예제 #20
0
 def test_get_node_vif_ids_two_portgroups(self):
     pg1 = db_utils.create_test_portgroup(
         node_id=self.node.id,
         extra={'vif_port_id': 'test-vif-A'})
     pg2 = db_utils.create_test_portgroup(
         uuid=uuidutils.generate_uuid(),
         address='dd:ee:ff:aa:bb:cc',
         node_id=self.node.id,
         name='barname',
         extra={'vif_port_id': 'test-vif-B'})
     expected = {'portgroups': {pg1.uuid: 'test-vif-A',
                                pg2.uuid: 'test-vif-B'},
                 'ports': {}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #21
0
 def test_get_node_vif_ids_two_portgroups(self):
     pg1 = db_utils.create_test_portgroup(
         node_id=self.node.id,
         extra={'vif_port_id': 'test-vif-A'})
     pg2 = db_utils.create_test_portgroup(
         uuid=uuidutils.generate_uuid(),
         address='dd:ee:ff:aa:bb:cc',
         node_id=self.node.id,
         name='barname',
         extra={'vif_port_id': 'test-vif-B'})
     expected = {'portgroups': {pg1.uuid: 'test-vif-A',
                                pg2.uuid: 'test-vif-B'},
                 'ports': {}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #22
0
 def test_get_node_vif_ids_two_ports(self):
     port1 = db_utils.create_test_port(node_id=self.node.id,
                                       address='aa:bb:cc:dd:ee:ff',
                                       uuid=uuidutils.generate_uuid(),
                                       extra={'vif_port_id': 'test-vif-A'},
                                       driver='fake')
     port2 = db_utils.create_test_port(node_id=self.node.id,
                                       address='dd:ee:ff:aa:bb:cc',
                                       uuid=uuidutils.generate_uuid(),
                                       extra={'vif_port_id': 'test-vif-B'},
                                       driver='fake')
     expected = {'portgroups': {},
                 'ports': {port1.uuid: 'test-vif-A',
                           port2.uuid: 'test-vif-B'}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #23
0
 def _test_get_node_vif_ids_two_portgroups(self, key):
     if key == "extra":
         kwargs1 = {key: {'vif_port_id': 'test-vif-A'}}
         kwargs2 = {key: {'vif_port_id': 'test-vif-B'}}
     else:
         kwargs1 = {key: {'tenant_vif_port_id': 'test-vif-A'}}
         kwargs2 = {key: {'tenant_vif_port_id': 'test-vif-B'}}
     pg1 = db_utils.create_test_portgroup(
         node_id=self.node.id, **kwargs1)
     pg2 = db_utils.create_test_portgroup(
         uuid=uuidutils.generate_uuid(),
         address='dd:ee:ff:aa:bb:cc',
         node_id=self.node.id,
         name='barname', **kwargs2)
     expected = {'portgroups': {pg1.uuid: 'test-vif-A',
                                pg2.uuid: 'test-vif-B'},
                 'ports': {}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #24
0
    def _get_port_ip_address(self, task, port_id):
        """Get ip address of ironic port assigned by neutron.

        :param task: a TaskManager instance.
        :param port_id: ironic Node's port UUID.
        :returns:  Neutron port ip address associated with Node's port.
        :raises: FailedToGetIPAddressOnPort
        :raises: InvalidIPv4Address
        """

        vifs = network.get_node_vif_ids(task)
        if not vifs:
            LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
                            " to get port IP address."),
                        {'node': task.node.uuid})
            raise exception.FailedToGetIPAddressOnPort(port_id=port_id)

        port_vif = vifs[port_id]

        port_ip_address = self._get_fixed_ip_address(port_vif)
        return port_ip_address
예제 #25
0
파일: neutron.py 프로젝트: yuanying/ironic
    def _get_port_ip_address(self, task, port_id):
        """Get ip address of ironic port assigned by neutron.

        :param task: a TaskManager instance.
        :param port_id: ironic Node's port UUID.
        :returns:  Neutron port ip address associated with Node's port.
        :raises: FailedToGetIPAddressOnPort
        :raises: InvalidIPv4Address
        """

        vifs = network.get_node_vif_ids(task)
        if not vifs:
            LOG.warning(_LW("No VIFs found for node %(node)s when attempting "
                            " to get port IP address."),
                        {'node': task.node.uuid})
            raise exception.FailedToGetIPAddressOnPort(port_id=port_id)

        port_vif = vifs[port_id]

        port_ip_address = self._get_fixed_ip_address(port_vif)
        return port_ip_address
예제 #26
0
    def update_dhcp_opts(self, task, options, vifs=None):
        """Send or update the DHCP BOOT options for this node.

        :param task: A TaskManager instance.
        :param options: this will be a list of dicts, e.g.

                        ::

                         [{'opt_name': 'bootfile-name',
                           'opt_value': 'pxelinux.0'},
                          {'opt_name': 'server-ip-address',
                           'opt_value': '123.123.123.456'},
                          {'opt_name': 'tftp-server',
                           'opt_value': '123.123.123.123'}]
        :param vifs: a dict of Neutron port/portgroup dicts
                     to update DHCP options on. The port/portgroup dict
                     key should be Ironic port UUIDs, and the values
                     should be Neutron port UUIDs, e.g.

                     ::

                      {'ports': {'port.uuid': vif.id},
                       'portgroups': {'portgroup.uuid': vif.id}}
                      If the value is None, will get the list of
                      ports/portgroups from the Ironic port/portgroup
                      objects.
        """
        if vifs is None:
            vifs = network.get_node_vif_ids(task)
        if not (vifs['ports'] or vifs['portgroups']):
            raise exception.FailedToUpdateDHCPOptOnPort(
                _("No VIFs found for node %(node)s when attempting "
                  "to update DHCP BOOT options.") % {'node': task.node.uuid})

        failures = []
        vif_list = [vif for pdict in vifs.values() for vif in pdict.values()]
        for vif in vif_list:
            try:
                self.update_port_dhcp_opts(vif,
                                           options,
                                           token=task.context.auth_token)
            except exception.FailedToUpdateDHCPOptOnPort:
                failures.append(vif)

        if failures:
            if len(failures) == len(vif_list):
                raise exception.FailedToUpdateDHCPOptOnPort(
                    _("Failed to set DHCP BOOT options for any port on node %s."
                      ) % task.node.uuid)
            else:
                LOG.warning(
                    _LW("Some errors were encountered when updating "
                        "the DHCP BOOT options for node %(node)s on "
                        "the following Neutron ports: %(ports)s."), {
                            'node': task.node.uuid,
                            'ports': failures
                        })

        # TODO(adam_g): Hack to workaround bug 1334447 until we have a
        # mechanism for synchronizing events with Neutron. We need to sleep
        # only if server gets to PXE faster than Neutron agents have setup
        # sufficient DHCP config for netboot. It may occur when we are using
        # VMs or hardware server with fast boot enabled.
        port_delay = CONF.neutron.port_setup_delay
        # TODO(vsaienko) remove hardcoded value for SSHPower driver
        # after Newton release.
        if isinstance(task.driver.power, ssh.SSHPower) and port_delay == 0:
            LOG.warning(
                _LW("Setting the port delay to 15 for SSH power "
                    "driver by default, this will be removed in "
                    "Ocata release. Please set configuration "
                    "parameter port_setup_delay to 15."))
            port_delay = 15
        if port_delay != 0:
            LOG.debug("Waiting %d seconds for Neutron.", port_delay)
            time.sleep(port_delay)
예제 #27
0
 def test_get_node_vif_ids_no_ports_no_portgroups(self):
     expected = {'portgroups': {}, 'ports': {}}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #28
0
파일: neutron.py 프로젝트: ajya/ironic-fork
    def update_dhcp_opts(self, task, options, vifs=None):
        """Send or update the DHCP BOOT options for this node.

        :param task: A TaskManager instance.
        :param options: this will be a list of dicts, e.g.

                        ::

                         [{'opt_name': '67',
                           'opt_value': 'pxelinux.0',
                           'ip_version': 4},
                          {'opt_name': '66',
                           'opt_value': '123.123.123.456',
                           'ip_version': 4}]
        :param vifs: a dict of Neutron port/portgroup dicts
                     to update DHCP options on. The port/portgroup dict
                     key should be Ironic port UUIDs, and the values
                     should be Neutron port UUIDs, e.g.

                     ::

                      {'ports': {'port.uuid': vif.id},
                       'portgroups': {'portgroup.uuid': vif.id}}
                      If the value is None, will get the list of
                      ports/portgroups from the Ironic port/portgroup
                      objects.
        """
        if vifs is None:
            vifs = network.get_node_vif_ids(task)
        if not (vifs['ports'] or vifs['portgroups']):
            raise exception.FailedToUpdateDHCPOptOnPort(
                _("No VIFs found for node %(node)s when attempting "
                  "to update DHCP BOOT options.") % {'node': task.node.uuid})

        failures = []
        vif_list = [vif for pdict in vifs.values() for vif in pdict.values()]
        for vif in vif_list:
            try:
                self.update_port_dhcp_opts(vif, options, context=task.context)
            except exception.FailedToUpdateDHCPOptOnPort:
                failures.append(vif)

        if failures:
            if len(failures) == len(vif_list):
                raise exception.FailedToUpdateDHCPOptOnPort(
                    _("Failed to set DHCP BOOT options for any port on node %s."
                      ) % task.node.uuid)
            else:
                LOG.warning(
                    "Some errors were encountered when updating "
                    "the DHCP BOOT options for node %(node)s on "
                    "the following Neutron ports: %(ports)s.", {
                        'node': task.node.uuid,
                        'ports': failures
                    })

        # TODO(adam_g): Hack to workaround bug 1334447 until we have a
        # mechanism for synchronizing events with Neutron. We need to sleep
        # only if server gets to PXE faster than Neutron agents have setup
        # sufficient DHCP config for netboot. It may occur when we are using
        # VMs or hardware server with fast boot enabled.
        port_delay = CONF.neutron.port_setup_delay
        if port_delay != 0:
            LOG.debug("Waiting %d seconds for Neutron.", port_delay)
            time.sleep(port_delay)
예제 #29
0
 def test_get_node_vif_ids_no_ports(self):
     expected = {}
     with task_manager.acquire(self.context, self.node.uuid) as task:
         result = network.get_node_vif_ids(task)
     self.assertEqual(expected, result)
예제 #30
0
파일: neutron.py 프로젝트: Tehsmash/ironic
    def update_dhcp_opts(self, task, options, vifs=None):
        """Send or update the DHCP BOOT options for this node.

        :param task: A TaskManager instance.
        :param options: this will be a list of dicts, e.g.

                        ::

                         [{'opt_name': 'bootfile-name',
                           'opt_value': 'pxelinux.0'},
                          {'opt_name': 'server-ip-address',
                           'opt_value': '123.123.123.456'},
                          {'opt_name': 'tftp-server',
                           'opt_value': '123.123.123.123'}]
        :param vifs: a dict of Neutron port/portgroup dicts
                     to update DHCP options on. The port/portgroup dict
                     key should be Ironic port UUIDs, and the values
                     should be Neutron port UUIDs, e.g.

                     ::

                      {'ports': {'port.uuid': vif.id},
                       'portgroups': {'portgroup.uuid': vif.id}}
                      If the value is None, will get the list of
                      ports/portgroups from the Ironic port/portgroup
                      objects.
        """
        if vifs is None:
            vifs = network.get_node_vif_ids(task)
        if not (vifs['ports'] or vifs['portgroups']):
            raise exception.FailedToUpdateDHCPOptOnPort(
                _("No VIFs found for node %(node)s when attempting "
                  "to update DHCP BOOT options.") %
                {'node': task.node.uuid})

        failures = []
        vif_list = [vif for pdict in vifs.values() for vif in pdict.values()]
        for vif in vif_list:
            try:
                self.update_port_dhcp_opts(vif, options)
            except exception.FailedToUpdateDHCPOptOnPort:
                failures.append(vif)

        if failures:
            if len(failures) == len(vif_list):
                raise exception.FailedToUpdateDHCPOptOnPort(_(
                    "Failed to set DHCP BOOT options for any port on node %s.")
                    % task.node.uuid)
            else:
                LOG.warning("Some errors were encountered when updating "
                            "the DHCP BOOT options for node %(node)s on "
                            "the following Neutron ports: %(ports)s.",
                            {'node': task.node.uuid, 'ports': failures})

        # TODO(adam_g): Hack to workaround bug 1334447 until we have a
        # mechanism for synchronizing events with Neutron. We need to sleep
        # only if server gets to PXE faster than Neutron agents have setup
        # sufficient DHCP config for netboot. It may occur when we are using
        # VMs or hardware server with fast boot enabled.
        port_delay = CONF.neutron.port_setup_delay
        if port_delay != 0:
            LOG.debug("Waiting %d seconds for Neutron.", port_delay)
            time.sleep(port_delay)