Exemplo n.º 1
0
 def update_device_up(self, rpc_context, **kwargs):
     """Device is up on agent."""
     agent_id = kwargs.get('agent_id')
     device = kwargs.get('device')
     host = kwargs.get('host')
     LOG.debug("Device %(device)s up at agent %(agent_id)s",
               {'device': device, 'agent_id': agent_id})
     plugin = manager.NeutronManager.get_plugin()
     port_id = plugin._device_to_port_id(rpc_context, device)
     port = plugin.port_bound_to_host(rpc_context, port_id, host)
     if host and not port:
         LOG.debug("Device %(device)s not bound to the"
                   " agent host %(host)s",
                   {'device': device, 'host': host})
         return
     if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
         # NOTE(kevinbenton): we have to special case DVR ports because of
         # the special multi-binding status update logic they have that
         # depends on the host
         plugin.update_port_status(rpc_context, port_id,
                                   n_const.PORT_STATUS_ACTIVE, host)
     else:
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case). We
         # need to look up the full one before calling provisioning_complete
         if not port:
             port = ml2_db.get_port(rpc_context.session, port_id)
         if not port:
             # port doesn't exist, no need to add a provisioning block
             return
         provisioning_blocks.provisioning_complete(
             rpc_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
Exemplo n.º 2
0
 def set_port_status_up(self, port_id):
     # Port provisioning is complete now that OVN has reported
     # that the port is up.
     LOG.info(_LI("OVN reports status up for port: %s"), port_id)
     provisioning_blocks.provisioning_complete(
         n_context.get_admin_context(), port_id, resources.PORT, provisioning_blocks.L2_AGENT_ENTITY
     )
    def _pull_missed_statuses(self):
        LOG.debug("starting to pull pending statuses...")
        plugin = directory.get_plugin()
        filter = {"status": [n_const.PORT_STATUS_DOWN],
                  "vif_type": ["unbound"]}
        ports = plugin.get_ports(context.get_admin_context(), filter)

        if not ports:
            LOG.debug("no down ports found, done")
            return

        port_fetch_url = utils.get_odl_url(self.PORT_PATH)
        client = odl_client.OpenDaylightRestClient.create_client(
            url=port_fetch_url)

        for port in ports:
            port_id = port["id"]
            response = client.get(port_id)
            if response.status_code != 200:
                LOG.warning("Non-200 response code %s", str(response))
                continue
            odl_status = response.json()['port'][0]['status']
            if odl_status == n_const.PORT_STATUS_ACTIVE:
                # for now we only support transition from DOWN->ACTIVE
                # See https://bugs.launchpad.net/networking-odl/+bug/1686023
                provisioning_blocks.provisioning_complete(
                    context.get_admin_context(),
                    port_id, resources.PORT,
                    provisioning_blocks.L2_AGENT_ENTITY)
        LOG.debug("done pulling pending statuses")
 def test_adding_component_idempotent(self):
     for i in range(5):
         pb.add_provisioning_component(self.ctx, self.port.id,
                                       resources.PORT, 'entity1')
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'entity1')
     self.assertTrue(self.provisioned.called)
 def test_provisioning_of_correct_item(self):
     port2 = self._make_port()
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity1')
     pb.provisioning_complete(self.ctx, port2.id,
                              resources.PORT, 'entity1')
     self.provisioned.assert_called_once_with(
         resources.PORT, pb.PROVISIONING_COMPLETE, mock.ANY,
         context=self.ctx, object_id=port2.id)
Exemplo n.º 6
0
 def set_port_status_up(self, port_id):
     # Port provisioning is complete now that OVN has reported that the
     # port is up. Any provisioning block (possibly added during port
     # creation or when OVN reports that the port is down) must be removed.
     LOG.info(_LI("OVN reports status up for port: %s"), port_id)
     provisioning_blocks.provisioning_complete(
         n_context.get_admin_context(),
         port_id,
         resources.PORT,
         provisioning_blocks.L2_AGENT_ENTITY)
 def test_is_object_blocked(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e1')
     self.assertTrue(pb.is_object_blocked(self.ctx, self.port.id,
                                          resources.PORT))
     self.assertFalse(pb.is_object_blocked(self.ctx, 'xyz',
                                           resources.PORT))
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'e1')
     self.assertFalse(pb.is_object_blocked(self.ctx, self.port.id,
                                           resources.PORT))
 def test_adding_component_for_new_resource_type(self):
     provisioned = mock.Mock()
     registry.subscribe(provisioned, 'NETWORK', pb.PROVISIONING_COMPLETE)
     net = self._make_net()
     # expect failed because the model was not registered for the type
     with testtools.ExpectedException(RuntimeError):
         pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent')
     pb.add_model_for_resource('NETWORK', models_v2.Network)
     pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent')
     pb.provisioning_complete(self.ctx, net.id, 'NETWORK', 'ent')
     self.assertTrue(provisioned.called)
 def test_not_provisioned_until_final_component_complete(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity1')
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity2')
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity1')
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity2')
     self.assertTrue(self.provisioned.called)
 def test_remove_provisioning_component(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e1')
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'e2')
     self.assertTrue(pb.remove_provisioning_component(
           self.ctx, self.port.id, resources.PORT, 'e1'))
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'other')
     self.assertFalse(self.provisioned.called)
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'e2')
     self.assertTrue(self.provisioned.called)
Exemplo n.º 11
0
Arquivo: rpc.py Projeto: annp/neutron
 def update_device_up(self, rpc_context, **kwargs):
     """Device is up on agent."""
     agent_id = kwargs.get('agent_id')
     device = kwargs.get('device')
     host = kwargs.get('host')
     LOG.debug("Device %(device)s up at agent %(agent_id)s",
               {'device': device, 'agent_id': agent_id})
     plugin = manager.NeutronManager.get_plugin()
     port_id = plugin._device_to_port_id(rpc_context, device)
     port = plugin.port_bound_to_host(rpc_context, port_id, host)
     if host and not port:
         LOG.debug("Device %(device)s not bound to the"
                   " agent host %(host)s",
                   {'device': device, 'host': host})
         # this might mean that a VM is in the process of live migration
         # and vif was plugged on the destination compute node;
         # need to notify nova explicitly
         try:
             port = plugin._get_port(rpc_context, port_id)
         except exceptions.PortNotFound:
             LOG.debug("Port %s not found, will not notify nova.", port_id)
         else:
             if port.device_owner.startswith(
                     n_const.DEVICE_OWNER_COMPUTE_PREFIX):
                 plugin.nova_notifier.notify_port_active_direct(port)
         return
     if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
         # NOTE(kevinbenton): we have to special case DVR ports because of
         # the special multi-binding status update logic they have that
         # depends on the host
         plugin.update_port_status(rpc_context, port_id,
                                   n_const.PORT_STATUS_ACTIVE, host)
     else:
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case). We
         # need to look up the full one before calling provisioning_complete
         if not port:
             port = ml2_db.get_port(rpc_context.session, port_id)
         if not port:
             # port doesn't exist, no need to add a provisioning block
             return
         provisioning_blocks.provisioning_complete(
             rpc_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
Exemplo n.º 12
0
 def update_port_status_to_active(self, port, rpc_context, port_id, host):
     plugin = directory.get_plugin()
     if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
         # NOTE(kevinbenton): we have to special case DVR ports because of
         # the special multi-binding status update logic they have that
         # depends on the host
         plugin.update_port_status(rpc_context, port_id,
                                   n_const.PORT_STATUS_ACTIVE, host)
     else:
         # _device_to_port_id may have returned a truncated UUID if the
         # agent did not provide a full one (e.g. Linux Bridge case). We
         # need to look up the full one before calling provisioning_complete
         if not port:
             port = ml2_db.get_port(rpc_context, port_id)
         if not port:
             # port doesn't exist, no need to add a provisioning block
             return
         provisioning_blocks.provisioning_complete(
             rpc_context, port['id'], resources.PORT,
             provisioning_blocks.L2_AGENT_ENTITY)
Exemplo n.º 13
0
 def _process_websocket_recv(self, payload, reconnect):
     # Callback for websocket notification
     LOG.debug("Websocket notification for port status update")
     for event in odl_ws_client.EventDataParser.get_item(payload):
         operation, path, data = event.get_fields()
         if ((operation in [event.OPERATION_UPDATE,
              event.OPERATION_CREATE])):
             port_id = event.extract_field(path, "neutron:uuid")
             port_id = str(port_id).strip("'")
             status_field = data.get('status')
             if status_field is not None:
                 status = status_field.get('content')
                 LOG.debug("Update port for port id %s %s", port_id, status)
                 # for now we only support transition from DOWN->ACTIVE
                 # https://bugs.launchpad.net/networking-odl/+bug/1686023
                 if status == n_const.PORT_STATUS_ACTIVE:
                     provisioning_blocks.provisioning_complete(
                         context.get_admin_context(),
                         port_id, resources.PORT,
                         provisioning_blocks.L2_AGENT_ENTITY)
         if operation == event.OPERATION_DELETE:
             LOG.debug("PortStatus: Ignoring delete operation")
 def test_not_provisioned_when_wrong_component_reports(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity1')
     pb.provisioning_complete(self.ctx, self.port.id,
                              resources.PORT, 'entity2')
     self.assertFalse(self.provisioned.called)
 def test_provisioned_after_component_finishes(self):
     pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
                                   'entity')
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity')
     self.assertTrue(self.provisioned.called)
 def test_provisioned_with_no_components(self):
     pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
                              'entity')
     self.assertTrue(self.provisioned.called)
 def test_no_callback_on_missing_object(self):
     pb.provisioning_complete(self.ctx, 'someid', resources.PORT, 'entity')
     self.assertFalse(self.provisioned.called)
Exemplo n.º 18
0
 def dhcp_ready_on_ports(self, context, port_ids):
     for port_id in port_ids:
         provisioning_blocks.provisioning_complete(
             context, port_id, resources.PORT,
             provisioning_blocks.DHCP_ENTITY)
Exemplo n.º 19
0
 def dhcp_ready_on_ports(self, context, port_ids):
     for port_id in port_ids:
         provisioning_blocks.provisioning_complete(
             context, port_id, resources.PORT,
             provisioning_blocks.DHCP_ENTITY)