Exemplo n.º 1
0
    def _rest_get_hostconfigs(self):
        try:
            response = self.odl_rest_client.get()
            response.raise_for_status()
            hostconfigs = response.json()['hostconfigs']['hostconfig']
        except exceptions.ConnectionError:
            LOG.error(_LE("Cannot connect to the Opendaylight Controller"),
                      exc_info=True)
            return None
        except exceptions.HTTPError as e:
            # restconf returns 404 on operation when there is no entry
            if e.response.status_code == codes.not_found:
                LOG.debug("Response code not_found (404)"
                          " treated as an empty list")
                return []
            else:
                LOG.warning(_LW("REST/GET odl hostconfig failed, "),
                            exc_info=True)
                return None
        except KeyError:
            LOG.error(_LE("got invalid hostconfigs"), exc_info=True)
            return None
        except Exception:
            LOG.warning(_LW("REST/GET odl hostconfig failed, "), exc_info=True)
            return None
        else:
            if LOG.isEnabledFor(logging.DEBUG):
                _hconfig_str = jsonutils.dumps(response,
                                               sort_keys=True,
                                               indent=4,
                                               separators=(',', ': '))
                LOG.debug("ODLPORTBINDING hostconfigs:\n%s", _hconfig_str)

        return hostconfigs
Exemplo n.º 2
0
    def _sync_pending_rows(self, session, exit_after_run):
        while True:
            LOG.debug("Thread walking database")
            row = db.get_oldest_pending_db_row_with_lock(session)
            if not row:
                LOG.debug("No rows to sync")
                break

            # Validate the operation
            validate_func = (
                dependency_validations.VALIDATION_MAP[row.object_type])
            valid = validate_func(session, row)
            if not valid:
                LOG.info(
                    _LI("%(operation)s %(type)s %(uuid)s is not a "
                        "valid operation yet, skipping for now"), {
                            'operation': row.operation,
                            'type': row.object_type,
                            'uuid': row.object_uuid
                        })

                # Set row back to pending.
                db.update_db_row_state(session, row, odl_const.PENDING)
                if exit_after_run:
                    break
                continue

            LOG.info(
                _LI("Syncing %(operation)s %(type)s %(uuid)s"), {
                    'operation': row.operation,
                    'type': row.object_type,
                    'uuid': row.object_uuid
                })

            # Add code to sync this to ODL
            method, urlpath, to_send = self._json_data(row)

            try:
                self.client.sendjson(method, urlpath, to_send)
                db.update_db_row_state(session, row, odl_const.COMPLETED)
            except exceptions.ConnectionError as e:
                # Don't raise the retry count, just log an error
                LOG.error(_LE("Cannot connect to the Opendaylight Controller"))
                # Set row back to pending
                db.update_db_row_state(session, row, odl_const.PENDING)
                # Break our of the loop and retry with the next
                # timer interval
                break
            except Exception as e:
                LOG.error(
                    _LE("Error syncing %(type)s %(operation)s,"
                        " id %(uuid)s Error: %(error)s"), {
                            'type': row.object_type,
                            'uuid': row.object_uuid,
                            'operation': row.operation,
                            'error': e.message
                        })
                db.update_pending_db_row_retry(session, row,
                                               self._row_retry_count)
Exemplo n.º 3
0
    def _sync_pending_rows(self, session, exit_after_run):
        while True:
            LOG.debug("Thread walking database")
            row = db.get_oldest_pending_db_row_with_lock(session)
            if not row:
                LOG.debug("No rows to sync")
                break

            # Validate the operation
            validate_func = (dependency_validations.
                             VALIDATION_MAP[row.object_type])
            valid = validate_func(session, row)
            if not valid:
                LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a "
                             "valid operation yet, skipping for now"),
                         {'operation': row.operation,
                          'type': row.object_type,
                          'uuid': row.object_uuid})

                # Set row back to pending.
                db.update_db_row_state(session, row, odl_const.PENDING)
                if exit_after_run:
                    break
                continue

            LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"),
                     {'operation': row.operation, 'type': row.object_type,
                      'uuid': row.object_uuid})

            # Add code to sync this to ODL
            method, urlpath, to_send = self._json_data(row)

            try:
                self.client.sendjson(method, urlpath, to_send)
                db.update_db_row_state(session, row, odl_const.COMPLETED)
            except exceptions.ConnectionError as e:
                # Don't raise the retry count, just log an error
                LOG.error(_LE("Cannot connect to the Opendaylight Controller"))
                # Set row back to pending
                db.update_db_row_state(session, row, odl_const.PENDING)
                # Break our of the loop and retry with the next
                # timer interval
                break
            except Exception as e:
                LOG.error(_LE("Error syncing %(type)s %(operation)s,"
                              " id %(uuid)s Error: %(error)s"),
                          {'type': row.object_type,
                           'uuid': row.object_uuid,
                           'operation': row.operation,
                           'error': e.message})
                db.update_pending_db_row_retry(session, row,
                                               self._row_retry_count)
Exemplo n.º 4
0
 def sync_from_callback(self, context, operation, res_type, res_id,
                        resource_dict):
     object_type = res_type.plural.replace('_', '-')
     try:
         if operation == odl_const.ODL_DELETE:
             self.out_of_sync |= not self.client.try_delete(object_type +
                                                            '/' + res_id)
         else:
             if operation == odl_const.ODL_CREATE:
                 urlpath = object_type
                 method = 'post'
             elif operation == odl_const.ODL_UPDATE:
                 urlpath = object_type + '/' + res_id
                 method = 'put'
             self.client.sendjson(method, urlpath, resource_dict)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(
                 _LE("Unable to perform %(operation)s on "
                     "%(object_type)s %(res_id)s "
                     "%(resource_dict)s"), {
                         'operation': operation,
                         'object_type': object_type,
                         'res_id': res_id,
                         'resource_dict': resource_dict
                     })
             self.out_of_sync = True
Exemplo n.º 5
0
    def send_resource(self, operation, object_type, data):
        """Send over a single resource from Neutron to OpenDaylight.

        Prepare a rest call and send a single resource to ODL NB
        """
        # Convert underscores to dashes in the URL for ODL
        object_type_url = object_type.replace('_', '-')
        try:
            obj_id = data['id']
            if operation == odl_const.ODL_DELETE:
                self.client.try_delete(object_type_url + '/' + obj_id)
            else:
                if operation == odl_const.ODL_CREATE:
                    urlpath = object_type_url
                    method = 'post'
                elif operation == odl_const.ODL_UPDATE:
                    urlpath = object_type_url + '/' + obj_id
                    method = 'put'
                policy_data = self.convert_rules_format(data)
                self.client.sendjson(method, urlpath,
                                     {odl_const.ODL_POLICY: policy_data})
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Unable to perform %(operation)s on "
                        "%(object_type)s %(object_id)s"), {
                            'operation': operation,
                            'object_type': object_type,
                            'object_id': obj_id
                        })
def _vif_type_from_conf(conf, userspace_datapath_types):

    # take vif_type from datapath_type ------------------------------------
    if conf.datapath_type:
        # take it from  datapath_type
        if conf.datapath_type in USERSPACE_DATAPATH_TYPES:
            if conf.datapath_type not in userspace_datapath_types:
                LOG.warning(
                    _LW("Using user space data path type '%s' even if no "
                        "support was detected."), conf.datapath_type)
            return 'vhostuser'
        else:
            return 'ovs'

    # take vif_type from ovs_dpdk -----------------------------------------
    if conf.ovs_dpdk is True:
        if userspace_datapath_types:
            return 'vhostuser'

        raise ValueError(
            _LE("--ovs_dpdk option was specified but the 'netdev' datapath_type "
                "was not enabled. "
                "To override use option --datapath_type=netdev"))

    elif conf.ovs_dpdk is False:
        return 'ovs'

    # take detected dtype -------------------------------------------------
    if userspace_datapath_types:
        return 'vhostuser'
    else:
        return 'ovs'
def main(args=None):
    """Main."""

    conf = setup_conf(args)

    if os.geteuid() != 0:
        LOG.error(_LE('Root permissions are required to configure ovsdb.'))
        return 1

    try:
        set_ovs_extid_hostconfigs(conf=conf, ovs_vsctl=OvsVsctl())

    except Exception as ex:  # pylint: disable=broad-except
        LOG.error(_LE("Fatal error: %s"), ex, exc_info=conf.debug)
        return 1

    else:
        return 0
Exemplo n.º 8
0
    def _create_parsers(self):
        for parser_name in self.network_topology_parsers:
            try:
                yield NetworkTopologyParser.create_parser(parser_name)

            except Exception:
                LOG.exception(
                    _LE('Error initializing topology parser: %(parser_name)r'),
                    {'parser_name': parser_name})
Exemplo n.º 9
0
    def _create_parsers(self):
        for parser_name in self.network_topology_parsers:
            try:
                yield NetworkTopologyParser.create_parser(parser_name)

            except Exception:
                LOG.exception(
                    _LE('Error initializing topology parser: %(parser_name)r'),
                    {'parser_name': parser_name})
Exemplo n.º 10
0
    def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
        '''Yields all network topology elements referring to given host name

        '''

        host_addresses = [host_name]
        try:
            # It uses both compute host name and known IP addresses to
            # recognize topology elements valid for given computed host
            ip_addresses = utils.get_addresses_by_name(host_name)
        except Exception:
            ip_addresses = []
            LOG.exception(
                _LE('Unable to resolve IP addresses for host %(host_name)r'),
                {'host_name': host_name})
        else:
            host_addresses.extend(ip_addresses)

        yield_elements = set()
        try:
            for __, element in self._elements_by_ip.fetch_all(
                    host_addresses, cache_timeout):
                # yields every element only once
                if element not in yield_elements:
                    yield_elements.add(element)
                    yield element

        except cache.CacheFetchError as error:
            # This error is expected on most of the cases because typically not
            # all host_addresses maps to a network topology element.
            if yield_elements:
                # As we need only one element for every host we ignore the
                # case in which others host addresseses didn't map to any host
                LOG.debug(
                    'Host addresses not found in networking topology: %s',
                    ', '.join(error.missing_keys))
            else:
                LOG.exception(
                    _LE('No such network topology elements for given host '
                        '%(host_name)r and given IPs: %(ip_addresses)s.'), {
                            'host_name': host_name,
                            'ip_addresses': ", ".join(ip_addresses)
                        })
                error.reraise_cause()
Exemplo n.º 11
0
    def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
        '''Yields all network topology elements referring to given host name

        '''

        host_addresses = [host_name]
        try:
            # It uses both compute host name and known IP addresses to
            # recognize topology elements valid for given computed host
            ip_addresses = utils.get_addresses_by_name(host_name)
        except Exception:
            ip_addresses = []
            LOG.exception(
                _LE('Unable to resolve IP addresses for host %(host_name)r'),
                {'host_name': host_name})
        else:
            host_addresses.extend(ip_addresses)

        yield_elements = set()
        try:
            for __, element in self._elements_by_ip.fetch_all(
                    host_addresses, cache_timeout):
                # yields every element only once
                if element not in yield_elements:
                    yield_elements.add(element)
                    yield element

        except cache.CacheFetchError as error:
            # This error is expected on most of the cases because typically not
            # all host_addresses maps to a network topology element.
            if yield_elements:
                # As we need only one element for every host we ignore the
                # case in which others host addresseses didn't map to any host
                LOG.debug(
                    'Host addresses not found in networking topology: %s',
                    ', '.join(error.missing_keys))
            else:
                LOG.exception(
                    _LE('No such network topology elements for given host '
                        '%(host_name)r and given IPs: %(ip_addresses)s.'),
                    {'host_name': host_name,
                     'ip_addresses': ", ".join(ip_addresses)})
                error.reraise_cause()
Exemplo n.º 12
0
    def _hconfig_bind_port(self, port_context, hconfig):
        """bind port after validating odl host configuration."""
        valid_segment = None

        for segment in port_context.segments_to_bind:
            if self._is_valid_segment(segment, hconfig['configurations']):
                valid_segment = segment
                break
        else:
            LOG.debug("No valid segments found!")
            return False

        confs = hconfig['configurations']['supported_vnic_types']

        # nova provides vnic_type in port_context to neutron.
        # neutron provides supported vif_type for binding based on vnic_type
        # in this case ODL hostconfigs has the vif_type to bind for vnic_type
        vnic_type = port_context.current.get(portbindings.VNIC_TYPE)

        if vnic_type != portbindings.VNIC_NORMAL:
            LOG.error(_LE("Binding failed: unsupported VNIC %s"), vnic_type)
            return False

        for conf in confs:
            if conf["vnic_type"] == vnic_type:
                vif_type = conf.get('vif_type', portbindings.VIF_TYPE_OVS)
                LOG.debug("Binding vnic:'%s' to vif:'%s'", vnic_type, vif_type)
                break
        else:
            vif_type = portbindings.VIF_TYPE_OVS  # default: OVS
            LOG.warning(
                _LW("No supported vif type found for host %s!, "
                    "defaulting to OVS"), port_context.host)

        vif_details = conf.get('vif_details', {})

        if not vif_details:  # empty vif_details could be trouble, warn.
            LOG.warning(_LW("hostconfig:vif_details was empty!"))

        LOG.debug(
            "Bind port %(port)s on network %(network)s with valid "
            "segment %(segment)s and VIF type %(vif_type)r "
            "VIF details %(vif_details)r.", {
                'port': port_context.current['id'],
                'network': port_context.network.current['id'],
                'segment': valid_segment,
                'vif_type': vif_type,
                'vif_details': vif_details
            })

        port_context.set_binding(valid_segment[driver_api.ID],
                                 vif_type,
                                 vif_details,
                                 status=nl_const.PORT_STATUS_ACTIVE)
        return True
Exemplo n.º 13
0
 def delete_l2_gateway_postcommit(self, context, l2_gateway_id):
     LOG.info(_LI("ODL: Delete L2Gateway %(l2gatewayid)s"),
              {'l2gatewayid': l2_gateway_id})
     url = L2GATEWAYS + '/' + l2_gateway_id
     try:
         self.client.try_delete(url)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("ODL: L2Gateway delete"
                               " failed for gateway_id %(l2gatewayid)s"),
                           {'l2gatewayid': l2_gateway_id})
Exemplo n.º 14
0
 def create_l2_gateway_postcommit(self, context, l2_gateway):
     LOG.info(_LI("ODL: Create L2Gateway %(l2gateway)s"),
              {'l2gateway': l2_gateway})
     request = {'l2_gateway': l2_gateway}
     try:
         self.client.sendjson('post', L2GATEWAYS, request)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("ODL: L2Gateway create"
                               " failed for gateway %(l2gatewayid)s"),
                           {'l2gatewayid': l2_gateway['id']})
Exemplo n.º 15
0
 def delete_l2_gateway_connection_postcommit(self, context,
                                             l2_gateway_connection_id):
     LOG.info(_LI("ODL: Delete L2Gateway connection %(l2gwconnid)s"),
              {'l2gwconnid': l2_gateway_connection_id})
     url = L2GATEWAY_CONNECTIONS + '/' + l2_gateway_connection_id
     try:
         self.client.try_delete(url)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("ODL: L2Gateway connection delete"
                               " failed for connection %(l2gwconnid)s"),
                           {'l2gwconnid': l2_gateway_connection_id})
Exemplo n.º 16
0
def journal_recovery(session):
    for row in db.get_all_db_rows_by_state(session, odl_const.FAILED):
        try:
            LOG.debug("Attempting recovery of journal entry %s.", row)
            odl_resource = _CLIENT.get_client().get_resource(row.object_type,
                                                             row.object_uuid)
            if odl_resource is not None:
                _handle_existing_resource(session, row)
            else:
                _handle_non_existing_resource(session, row)
        except Exception:
            LOG.exception(
                _LE("Failure while recovering journal entry %s."), row)
Exemplo n.º 17
0
    def _execute_op(self, operation, session):
        op_details = operation.__name__
        if operation.__doc__:
            op_details += " (%s)" % operation.func_doc

        try:
            LOG.info(_LI("Starting maintenance operation %s."), op_details)
            db.update_maintenance_operation(session, operation=operation)
            operation(session=session)
            LOG.info(_LI("Finished maintenance operation %s."), op_details)
        except Exception:
            LOG.exception(_LE("Failed during maintenance operation %s."),
                          op_details)
Exemplo n.º 18
0
    def _execute_op(self, operation, session):
        op_details = operation.__name__
        if operation.__doc__:
            op_details += " (%s)" % operation.func_doc

        try:
            LOG.info(_LI("Starting maintenance operation %s."), op_details)
            db.update_maintenance_operation(session, operation=operation)
            operation(session=session)
            LOG.info(_LI("Finished maintenance operation %s."), op_details)
        except Exception:
            LOG.exception(_LE("Failed during maintenance operation %s."),
                          op_details)
Exemplo n.º 19
0
 def sendjson(self, method, urlpath, obj):
     """Send json to the OpenDaylight controller."""
     data = jsonutils.dumps(obj, indent=2) if obj else None
     try:
         return self._check_rensponse(
             self.request(method, urlpath, data))
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(_LE("REST request ( %(method)s ) to "
                           "url ( %(urlpath)s ) is failed."
                           "Request body : [%(body)s] service"),
                       {'method': method,
                        'urlpath': urlpath,
                        'body': obj})
Exemplo n.º 20
0
 def create_l2_gateway_connection_postcommit(self, context,
                                             l2_gateway_connection):
     LOG.info(_LI("ODL: Create L2Gateway connection %(l2gwconn)s"),
              {'l2gwconn': l2_gateway_connection})
     odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection)
     odl_l2_gateway_connection['gateway_id'] = (
         l2_gateway_connection['l2_gateway_id'])
     odl_l2_gateway_connection.pop('l2_gateway_id')
     request = {'l2gateway_connection': odl_l2_gateway_connection}
     try:
         self.client.sendjson('post', L2GATEWAY_CONNECTIONS, request)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception(_LE("ODL: L2Gateway connection create"
                               " failed for gateway %(l2gwconnid)s"),
                           {'l2gwconnid':
                            l2_gateway_connection['l2_gateway_id']})
Exemplo n.º 21
0
    def run_sync_thread(self, exit_after_run=False):
        while True:
            try:
                self.event.wait()
                self.event.clear()

                session = neutron_db_api.get_session()
                self._sync_pending_rows(session, exit_after_run)

                LOG.debug("Clearing sync thread event")
                if exit_after_run:
                    # Permanently waiting thread model breaks unit tests
                    # Adding this arg to exit here only for unit tests
                    break
            except Exception:
                # Catch exceptions to protect the thread while running
                LOG.exception(_LE("Error on run_sync_thread"))
Exemplo n.º 22
0
 def bind_port(self, port_context):
     controller_details = {'name': self.name, 'controller': self.controller}
     try:
         self.controller.bind_port(port_context)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception(
                 _LE("Controller '%(name)s (%(controller)r)' had an error "
                     "when binding port."), controller_details)
     else:
         if port_context._new_bound_segment:
             LOG.info(
                 _LI("Controller '%(name)s (%(controller)r)' has bound "
                     "port."), controller_details)
         else:
             LOG.debug(
                 "Controller %(name)s (%(controller)r) hasn't bound "
                 "port.", controller_details)
Exemplo n.º 23
0
 def bind_port(self, port_context):
     controller_details = {'name': self.name, 'controller': self.controller}
     try:
         self.controller.bind_port(port_context)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception(
                 _LE("Controller '%(name)s (%(controller)r)' had an error "
                     "when binding port."), controller_details)
     else:
         if port_context._new_bound_segment:
             LOG.info(
                 _LI("Controller '%(name)s (%(controller)r)' has bound "
                     "port."), controller_details)
         else:
             LOG.debug(
                 "Controller %(name)s (%(controller)r) hasn't bound "
                 "port.", controller_details)
Exemplo n.º 24
0
    def _fetch_and_parse_network_topology(self, addresses):
        # The cache calls this method to fecth new elements when at least one
        # of the addresses is not in the cache or it has expired.

        # pylint: disable=unused-argument
        LOG.info(_LI('Fetch network topology from ODL.'))
        response = self._client.get()
        response.raise_for_status()

        network_topology = response.json()
        if LOG.isEnabledFor(logging.DEBUG):
            topology_str = jsonutils.dumps(network_topology,
                                           sort_keys=True,
                                           indent=4,
                                           separators=(',', ': '))
            LOG.debug("Got network topology:\n%s", topology_str)

        at_least_one_element_for_asked_addresses = False
        for parser in self._parsers:
            try:
                for element in parser.parse_network_topology(network_topology):
                    if not isinstance(element, NetworkTopologyElement):
                        raise TypeError(
                            _("Yield element doesn't implement interface "
                              "'NetworkTopologyElement': {!r}").format(
                                  element))
                    # the same element can be known by more host addresses
                    for host_address in element.host_addresses:
                        if host_address in addresses:
                            at_least_one_element_for_asked_addresses = True
                        yield host_address, element
            except Exception:
                LOG.exception(
                    _LE("Parser %(parser)r failed to parse network topology."),
                    {'parser': parser})

        if not at_least_one_element_for_asked_addresses:
            # this will mark entries for given addresses as failed to allow
            # calling this method again as soon it is requested and avoid
            # waiting for cache expiration
            raise ValueError(
                _('No such topology element for given host addresses: {}').
                format(', '.join(addresses)))
Exemplo n.º 25
0
    def _fetch_and_parse_network_topology(self, addresses):
        # The cache calls this method to fecth new elements when at least one
        # of the addresses is not in the cache or it has expired.

        # pylint: disable=unused-argument
        LOG.info(_LI('Fetch network topology from ODL.'))
        response = self._client.get()
        response.raise_for_status()

        network_topology = response.json()
        if LOG.isEnabledFor(logging.DEBUG):
            topology_str = jsonutils.dumps(
                network_topology, sort_keys=True, indent=4,
                separators=(',', ': '))
            LOG.debug("Got network topology:\n%s", topology_str)

        at_least_one_element_for_asked_addresses = False
        for parser in self._parsers:
            try:
                for element in parser.parse_network_topology(network_topology):
                    if not isinstance(element, NetworkTopologyElement):
                        raise TypeError(
                            "Yield element doesn't implement interface "
                            "'NetworkTopologyElement': {!r}".format(element))
                    # the same element can be known by more host addresses
                    for host_address in element.host_addresses:
                        if host_address in addresses:
                            at_least_one_element_for_asked_addresses = True
                        yield host_address, element
            except Exception:
                LOG.exception(
                    _LE("Parser %(parser)r failed to parse network topology."),
                    {'parser': parser})

        if not at_least_one_element_for_asked_addresses:
            # this will mark entries for given addresses as failed to allow
            # calling this method again as soon it is requested and avoid
            # waiting for cache expiration
            raise ValueError(
                'No such topology element for given host addresses: {}'.format(
                    ', '.join(addresses)))
Exemplo n.º 26
0
    def sync_from_callback(self, context, operation, res_type, res_id,
                           resource_dict, **kwrags):
        object_type = odl_utils.neutronify(res_type.plural)
        try:
            if operation == odl_const.ODL_DELETE:
                self.out_of_sync |= not self.client.try_delete(object_type +
                                                               '/' + res_id)
            else:
                if operation == odl_const.ODL_CREATE:
                    urlpath = object_type
                    method = 'post'
                elif operation == odl_const.ODL_UPDATE:
                    urlpath = object_type + '/' + res_id
                    method = 'put'
                self.client.sendjson(method, urlpath, resource_dict)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Unable to perform %(operation)s on "
                        "%(object_type)s %(res_id)s "
                        "%(resource_dict)s"), {
                            'operation': operation,
                            'object_type': object_type,
                            'res_id': res_id,
                            'resource_dict': resource_dict
                        })
                self.out_of_sync = True

        # NOTE(yamahata) when security group is created, default rules
        # are also created.
        if (operation == odl_const.ODL_CREATE
                and res_type.singular == odl_const.ODL_SG):
            for rule in resource_dict[odl_const.ODL_SG][
                    odl_const.ODL_SG_RULES]:
                self.sync_from_callback(
                    context, odl_const.ODL_CREATE,
                    odl_call._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE],
                    rule['id'], {odl_const.ODL_SG_RULE: rule})
Exemplo n.º 27
0
    def sync_single_resource(self, operation, object_type, context):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        # Convert underscores to dashes in the URL for ODL
        object_type_url = odl_utils.neutronify(object_type)
        try:
            obj_id = context.current['id']
            if operation == odl_const.ODL_DELETE:
                self.out_of_sync |= not self.client.try_delete(
                    object_type_url + '/' + obj_id)
            else:
                filter_cls = self.FILTER_MAP[object_type]
                if operation == odl_const.ODL_CREATE:
                    urlpath = object_type_url
                    method = 'post'
                    attr_filter = filter_cls.filter_create_attributes
                elif operation == odl_const.ODL_UPDATE:
                    urlpath = object_type_url + '/' + obj_id
                    method = 'put'
                    attr_filter = filter_cls.filter_update_attributes
                resource = copy.deepcopy(context.current)
                attr_filter(resource, context)
                self.client.sendjson(method, urlpath,
                                     {object_type_url[:-1]: resource})
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Unable to perform %(operation)s on "
                        "%(object_type)s %(object_id)s"), {
                            'operation': operation,
                            'object_type': object_type,
                            'object_id': obj_id
                        })
                self.out_of_sync = True
Exemplo n.º 28
0
    def _update_agents_db(self, hostconfigs):
        LOG.debug("ODLPORTBINDING Updating agents DB with ODL hostconfigs")

        agents_db = self._get_neutron_db_plugin()

        if not agents_db:  # if ML2 is still initializing
            LOG.warning(
                _LW("ML2 still initializing, Will retry agentdb"
                    " update on next poll"))
            return  # Retry on next poll

        for host_config in hostconfigs:
            try:
                self.agentdb_row['host'] = host_config['host-id']
                self.agentdb_row['agent_type'] = host_config['host-type']
                self.agentdb_row['configurations'] = jsonutils.loads(
                    host_config['config'])

                agents_db.create_or_update_agent(context.get_admin_context(),
                                                 self.agentdb_row)
            except Exception:
                LOG.exception(_LE("Unable to update agentdb."))
                continue  # try next hostcofig
Exemplo n.º 29
0
 def sync_from_callback(self, operation, object_type, res_id,
                        resource_dict):
     try:
         if operation == odl_const.ODL_DELETE:
             self.out_of_sync |= not self.client.try_delete(
                 object_type + '/' + res_id)
         else:
             if operation == odl_const.ODL_CREATE:
                 urlpath = object_type
                 method = 'post'
             elif operation == odl_const.ODL_UPDATE:
                 urlpath = object_type + '/' + res_id
                 method = 'put'
             self.client.sendjson(method, urlpath, resource_dict)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(_LE("Unable to perform %(operation)s on "
                           "%(object_type)s %(res_id)s %(resource_dict)s"),
                       {'operation': operation,
                        'object_type': object_type,
                        'res_id': res_id,
                        'resource_dict': resource_dict})
             self.out_of_sync = True
Exemplo n.º 30
0
    def sync_single_resource(self, operation, object_type, context):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        # Convert underscores to dashes in the URL for ODL
        object_type_url = object_type.replace('_', '-')
        try:
            obj_id = context.current['id']
            if operation == odl_const.ODL_DELETE:
                self.out_of_sync |= not self.client.try_delete(
                    object_type_url + '/' + obj_id)
            else:
                filter_cls = self.FILTER_MAP[object_type]
                if operation == odl_const.ODL_CREATE:
                    urlpath = object_type_url
                    method = 'post'
                    attr_filter = filter_cls.filter_create_attributes
                elif operation == odl_const.ODL_UPDATE:
                    urlpath = object_type_url + '/' + obj_id
                    method = 'put'
                    attr_filter = filter_cls.filter_update_attributes
                resource = copy.deepcopy(context.current)
                attr_filter(resource, context)
                self.client.sendjson(method, urlpath,
                                     {object_type_url[:-1]: resource})
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Unable to perform %(operation)s on "
                              "%(object_type)s %(object_id)s"),
                          {'operation': operation,
                           'object_type': object_type,
                           'object_id': obj_id})
                self.out_of_sync = True
Exemplo n.º 31
0
    def sync_from_callback_precommit(self, context, operation, res_type,
                                     res_id, resource_dict, **kwargs):
        object_type = res_type.singular
        if resource_dict is not None:
            resource_dict = resource_dict[object_type]

        if (operation == odl_const.ODL_CREATE
                and object_type == odl_const.ODL_SG):
            self._sync_security_group_create_precommit(context, operation,
                                                       object_type, res_id,
                                                       resource_dict)
            return

        # NOTE(yamahata): in security group/security gorup rule case,
        # orm object is passed. not resource dict. So we have to convert it
        # into resource_dict
        if not isinstance(resource_dict, dict) and resource_dict is not None:
            if object_type == odl_const.ODL_SG:
                resource_dict = self._make_security_group_dict(resource_dict)
            elif object_type == odl_const.ODL_SG_RULE:
                resource_dict = self._make_security_group_rule_dict(
                    resource_dict)
        # NOTE(yamahata): bug work around
        # callback for update of security grouop doesn't pass complete
        # info. So we have to build it. Once the bug is fixed, remove
        # this bug work around.
        # https://launchpad.net/bugs/1546910
        # https://review.openstack.org/#/c/281693/
        elif (object_type == odl_const.ODL_SG
              and operation == odl_const.ODL_UPDATE):
            # NOTE(yamahata): precommit_update is called before updating
            # values. so context.session.{new, dirty} doesn't include sg
            # in question. a dictionary with new values needs to be build.
            core_plugin = directory.get_plugin()
            sg = core_plugin._get_security_group(context, res_id)
            tmp_dict = self._make_security_group_dict(sg)
            tmp_dict.update(resource_dict)
            resource_dict = tmp_dict

        object_uuid = (resource_dict.get('id')
                       if operation == 'create' else res_id)
        if object_uuid is None:
            # NOTE(yamahata): bug work around bug/1546910
            # TODO(yamahata): once the following patch is merged
            # remove this bug work around
            # https://review.openstack.org/#/c/281693/
            assert object_type == odl_const.ODL_SG_RULE
            # NOTE(yamahata): bulk creation case
            # context.session.new accumulates all newly created orm object.
            # there is no easy way to pick up the lastly added orm object.
            rules = [
                rule for rule in context.session.new
                if (isinstance(rule, securitygroup.SecurityGroupRule))
            ]
            if len(rules) == 1:
                object_uuid = rules[0].id
                resource_dict['id'] = object_uuid
            else:
                LOG.error(_LE("bulk creation of sgrule isn't supported"))
                raise NotImplementedError(
                    _("unsupporetd bulk creation of security group rule"))
        journal.record(context, object_type, object_uuid, operation,
                       resource_dict)
        # NOTE(yamahata): DB auto deletion
        # Security Group Rule under this Security Group needs to
        # be deleted. At NeutronDB layer rules are auto deleted with
        # cascade='all,delete'.
        if (object_type == odl_const.ODL_SG
                and operation == odl_const.ODL_DELETE):
            for rule in kwargs['security_group'].rules:
                journal.record(context, odl_const.ODL_SG_RULE, rule.id,
                               odl_const.ODL_DELETE, [object_uuid])
Exemplo n.º 32
0
    def bind_port(self, port_context):
        """Set binding for a valid segment

        """
        host_name = port_context.host
        elements = list()
        try:
            # Append to empty list to add as much elements as possible
            # in the case it raises an exception
            elements.extend(self._fetch_elements_by_host(host_name))
        except Exception:
            LOG.exception(
                _LE('Error fetching elements for host %(host_name)r.'),
                {'host_name': host_name}, exc_info=1)

        if not elements:
            # In case it wasn't able to find any network topology element
            # for given host then it uses the legacy OVS one keeping the old
            # behaviour
            LOG.warning(
                _LW('Using legacy OVS network topology element for port '
                    'binding for host: %(host_name)r.'),
                {'host_name': host_name})

            # Imported here to avoid cyclic module dependencies
            from networking_odl.ml2 import ovsdb_topology
            elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]

        # TODO(Federico Ressi): in the case there are more candidate virtual
        # switches instances for the same host it choses one for binding
        # port. As there isn't any know way to perform this selection it
        # selects a VIF type that is valid for all switches that have
        # been found and a VIF type valid for all them. This has to be improved
        for vif_type in self.valid_vif_types:
            vif_type_is_valid_for_all = True
            for element in elements:
                if vif_type not in element.valid_vif_types:
                    # it is invalid for at least one element: discard it
                    vif_type_is_valid_for_all = False
                    break

            if vif_type_is_valid_for_all:
                # This is the best VIF type valid for all elements
                LOG.debug(
                    "Found VIF type %(vif_type)r valid for all network "
                    "topology elements for host %(host_name)r.",
                    {'vif_type': vif_type, 'host_name': host_name})

                for element in elements:
                    # It assumes that any element could be good for given host
                    # In most of the cases I expect exactely one element for
                    # every compute host
                    try:
                        return element.bind_port(
                            port_context, vif_type, self._vif_details)

                    except Exception:
                        LOG.exception(
                            _LE('Network topology element has failed binding '
                                'port:\n%(element)s'),
                            {'element': element.to_json()})

        LOG.error(
            _LE('Unable to bind port element for given host and valid VIF '
                'types:\n'
                '\thostname: %(host_name)s\n'
                '\tvalid VIF types: %(valid_vif_types)s'),
            {'host_name': host_name,
             'valid_vif_types': ', '.join(self.valid_vif_types)})
Exemplo n.º 33
0
    def sync_pending_row(self, exit_after_run=False):
        # Block until all pending rows are processed
        session = neutron_db_api.get_session()
        while not self.event.is_set():
            self.event.wait()
            # Clear the event and go back to waiting after
            # the sync block exits
            self.event.clear()
            while True:
                LOG.debug("Thread walking database")
                row = db.get_oldest_pending_db_row_with_lock(session)
                if not row:
                    LOG.debug("No rows to sync")
                    break

                # Validate the operation
                validate_func = (
                    dependency_validations.VALIDATION_MAP[row.object_type])
                valid = validate_func(session, row)
                if not valid:
                    LOG.info(
                        _LI("%(operation)s %(type)s %(uuid)s is not a "
                            "valid operation yet, skipping for now"), {
                                'operation': row.operation,
                                'type': row.object_type,
                                'uuid': row.object_uuid
                            })

                    # Set row back to pending.
                    db.update_db_row_state(session, row, odl_const.PENDING)
                    if exit_after_run:
                        break
                    continue

                LOG.info(
                    _LI("Syncing %(operation)s %(type)s %(uuid)s"), {
                        'operation': row.operation,
                        'type': row.object_type,
                        'uuid': row.object_uuid
                    })

                # Add code to sync this to ODL
                method, urlpath, to_send = self._json_data(row)

                try:
                    self.client.sendjson(method, urlpath, to_send)
                    db.update_db_row_state(session, row, odl_const.COMPLETED)
                except exceptions.ConnectionError as e:
                    # Don't raise the retry count, just log an error
                    LOG.error(
                        _LE("Cannot connect to the Opendaylight "
                            "Controller"))
                    # Set row back to pending
                    db.update_db_row_state(session, row, odl_const.PENDING)
                    # Break our of the loop and retry with the next
                    # timer interval
                    break
                except Exception as e:
                    LOG.error(
                        _LE("Error syncing %(type)s %(operation)s,"
                            " id %(uuid)s Error: %(error)s"), {
                                'type': row.object_type,
                                'uuid': row.object_uuid,
                                'operation': row.operation,
                                'error': e.message
                            })
                    db.update_pending_db_row_retry(session, row,
                                                   self._row_retry_count)
            LOG.debug("Clearing sync thread event")
            if exit_after_run:
                # Permanently waiting thread model breaks unit tests
                # Adding this arg to exit here only for unit tests
                break
Exemplo n.º 34
0
    def bind_port(self, port_context):
        """Set binding for a valid segment

        """
        host_name = port_context.host
        elements = list()
        try:
            # Append to empty list to add as much elements as possible
            # in the case it raises an exception
            elements.extend(self._fetch_elements_by_host(host_name))
        except Exception:
            LOG.exception(
                _LE('Error fetching elements for host %(host_name)r.'),
                {'host_name': host_name},
                exc_info=1)

        if not elements:
            # In case it wasn't able to find any network topology element
            # for given host then it uses the legacy OVS one keeping the old
            # behaviour
            LOG.warning(
                _LW('Using legacy OVS network topology element for port '
                    'binding for host: %(host_name)r.'),
                {'host_name': host_name})

            # Imported here to avoid cyclic module dependencies
            from networking_odl.ml2 import ovsdb_topology
            elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]

        # TODO(Federico Ressi): in the case there are more candidate virtual
        # switches instances for the same host it choses one for binding
        # port. As there isn't any know way to perform this selection it
        # selects a VIF type that is valid for all switches that have
        # been found and a VIF type valid for all them. This has to be improved
        for vif_type in self.valid_vif_types:
            vif_type_is_valid_for_all = True
            for element in elements:
                if vif_type not in element.valid_vif_types:
                    # it is invalid for at least one element: discard it
                    vif_type_is_valid_for_all = False
                    break

            if vif_type_is_valid_for_all:
                # This is the best VIF type valid for all elements
                LOG.debug(
                    "Found VIF type %(vif_type)r valid for all network "
                    "topology elements for host %(host_name)r.", {
                        'vif_type': vif_type,
                        'host_name': host_name
                    })

                for element in elements:
                    # It assumes that any element could be good for given host
                    # In most of the cases I expect exactely one element for
                    # every compute host
                    try:
                        return element.bind_port(port_context, vif_type,
                                                 self._vif_details)

                    except Exception:
                        LOG.exception(
                            _LE('Network topology element has failed binding '
                                'port:\n%(element)s'),
                            {'element': element.to_json()})

        LOG.error(
            _LE('Unable to bind port element for given host and valid VIF '
                'types:\n'
                '\thostname: %(host_name)s\n'
                '\tvalid VIF types: %(valid_vif_types)s'), {
                    'host_name': host_name,
                    'valid_vif_types': ', '.join(self.valid_vif_types)
                })
Exemplo n.º 35
0
    def sync_pending_row(self, exit_after_run=False):
        # Block until all pending rows are processed
        session = neutron_db_api.get_session()
        while not self.event.is_set():
            self.event.wait()
            # Clear the event and go back to waiting after
            # the sync block exits
            self.event.clear()
            while True:
                LOG.debug("Thread walking database")
                row = db.get_oldest_pending_db_row_with_lock(session)
                if not row:
                    LOG.debug("No rows to sync")
                    break

                # Validate the operation
                validate_func = (dependency_validations.
                                 VALIDATION_MAP[row.object_type])
                valid = validate_func(session, row)
                if not valid:
                    LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a "
                                 "valid operation yet, skipping for now"),
                             {'operation': row.operation,
                              'type': row.object_type,
                              'uuid': row.object_uuid})

                    # Set row back to pending.
                    db.update_db_row_state(session, row, odl_const.PENDING)
                    if exit_after_run:
                        break
                    continue

                LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"),
                         {'operation': row.operation, 'type': row.object_type,
                          'uuid': row.object_uuid})

                # Add code to sync this to ODL
                method, urlpath, to_send = self._json_data(row)

                try:
                    self.client.sendjson(method, urlpath, to_send)
                    db.update_db_row_state(session, row, odl_const.COMPLETED)
                except exceptions.ConnectionError as e:
                    # Don't raise the retry count, just log an error
                    LOG.error(_LE("Cannot connect to the Opendaylight "
                                  "Controller"))
                    # Set row back to pending
                    db.update_db_row_state(session, row, odl_const.PENDING)
                    # Break our of the loop and retry with the next
                    # timer interval
                    break
                except Exception as e:
                    LOG.error(_LE("Error syncing %(type)s %(operation)s,"
                                  " id %(uuid)s Error: %(error)s"),
                              {'type': row.object_type,
                               'uuid': row.object_uuid,
                               'operation': row.operation,
                               'error': e.message})
                    db.update_pending_db_row_retry(session, row,
                                                   self._row_retry_count)
            LOG.debug("Clearing sync thread event")
            if exit_after_run:
                # Permanently waiting thread model breaks unit tests
                # Adding this arg to exit here only for unit tests
                break