def sync_from_callback(self, operation, object_type, res_id, resource_dict): try: if operation == odl_const.ODL_DELETE: self.out_of_sync |= not self.client.try_delete(object_type + '/' + res_id) else: if operation == odl_const.ODL_CREATE: urlpath = object_type method = 'post' elif operation == odl_const.ODL_UPDATE: urlpath = object_type + '/' + res_id method = 'put' self.client.sendjson(method, urlpath, resource_dict) except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Unable to perform %(operation)s on " "%(object_type)s %(res_id)s %(resource_dict)s"), { 'operation': operation, 'object_type': object_type, 'res_id': res_id, 'resource_dict': resource_dict }) self.out_of_sync = True
def _create_parsers(self): for parser_name in self.network_topology_parsers: try: yield NetworkTopologyParser.create_parser(parser_name) except Exception: LOG.exception( _LE('Error initializing topology parser: %(parser_name)r'), {'parser_name': parser_name})
def _fetch_elements_by_host(self, host_name, cache_timeout=60.0): '''Yields all network topology elements referring to given host name ''' host_addresses = [host_name] try: # It uses both compute host name and known IP addresses to # recognize topology elements valid for given computed host ip_addresses = utils.get_addresses_by_name(host_name) except Exception: ip_addresses = [] LOG.exception( _LE('Unable to resolve IP addresses for host %(host_name)r'), {'host_name': host_name}) else: host_addresses.extend(ip_addresses) yield_elements = set() try: for _, element in self._elements_by_ip.fetch_all( host_addresses, cache_timeout): # yields every element only once if element not in yield_elements: yield_elements.add(element) yield element except cache.CacheFetchError as error: # This error is expected on most of the cases because typically not # all host_addresses maps to a network topology element. if yield_elements: # As we need only one element for every host we ignore the # case in which others host addresseses didn't map to any host LOG.debug( 'Host addresses not found in networking topology: %s', ', '.join(error.missing_keys)) else: LOG.exception( _LE('No such network topology elements for given host ' '%(host_name)r and given IPs: %(ip_addresses)s.'), { 'host_name': host_name, 'ip_addresses': ", ".join(ip_addresses) }) error.reraise_cause()
def _fetch_elements_by_host(self, host_name, cache_timeout=60.0): '''Yields all network topology elements referring to given host name ''' host_addresses = [host_name] try: # It uses both compute host name and known IP addresses to # recognize topology elements valid for given computed host ip_addresses = utils.get_addresses_by_name(host_name) except Exception: ip_addresses = [] LOG.exception( _LE('Unable to resolve IP addresses for host %(host_name)r'), {'host_name': host_name}) else: host_addresses.extend(ip_addresses) yield_elements = set() try: for _, element in self._elements_by_ip.fetch_all( host_addresses, cache_timeout): # yields every element only once if element not in yield_elements: yield_elements.add(element) yield element except cache.CacheFetchError as error: # This error is expected on most of the cases because typically not # all host_addresses maps to a network topology element. if yield_elements: # As we need only one element for every host we ignore the # case in which others host addresseses didn't map to any host LOG.debug( 'Host addresses not found in networking topology: %s', ', '.join(error.missing_keys)) else: LOG.exception( _LE('No such network topology elements for given host ' '%(host_name)r and given IPs: %(ip_addresses)s.'), {'host_name': host_name, 'ip_addresses': ", ".join(ip_addresses)}) error.reraise_cause()
def _fetch_and_parse_network_topology(self, addresses): # The cache calls this method to fecth new elements when at least one # of the addresses is not in the cache or it has expired. # pylint: disable=unused-argument LOG.info(_LI('Fetch network topology from ODL.')) response = self._client.get() response.raise_for_status() network_topology = response.json() if LOG.isEnabledFor(logging.DEBUG): topology_str = jsonutils.dumps(network_topology, sort_keys=True, indent=4, separators=(',', ': ')) LOG.debug("Got network topology:\n%s", topology_str) at_least_one_element_for_asked_addresses = False for parser in self._parsers: try: for element in parser.parse_network_topology(network_topology): if not isinstance(element, NetworkTopologyElement): raise TypeError( "Yield element doesn't implement interface " "'NetworkTopologyElement': {!r}".format(element)) # the same element can be known by more host addresses for host_address in element.host_addresses: if host_address in addresses: at_least_one_element_for_asked_addresses = True yield host_address, element except Exception: LOG.exception( _LE("Parser %(parser)r failed to parse network topology."), {'parser': parser}) if not at_least_one_element_for_asked_addresses: # this will mark entries for given addresses as failed to allow # calling this method again as soon it is requested and avoid # waiting for cache expiration raise ValueError( 'No such topology element for given host addresses: {}'.format( ', '.join(addresses)))
def _fetch_and_parse_network_topology(self, addresses): # The cache calls this method to fecth new elements when at least one # of the addresses is not in the cache or it has expired. # pylint: disable=unused-argument LOG.info(_LI('Fetch network topology from ODL.')) response = self._client.get() response.raise_for_status() network_topology = response.json() if LOG.isEnabledFor(logging.DEBUG): topology_str = jsonutils.dumps( network_topology, sort_keys=True, indent=4, separators=(',', ': ')) LOG.debug("Got network topology:\n%s", topology_str) at_least_one_element_for_asked_addresses = False for parser in self._parsers: try: for element in parser.parse_network_topology(network_topology): if not isinstance(element, NetworkTopologyElement): raise TypeError( "Yield element doesn't implement interface " "'NetworkTopologyElement': {!r}".format(element)) # the same element can be known by more host addresses for host_address in element.host_addresses: if host_address in addresses: at_least_one_element_for_asked_addresses = True yield host_address, element except Exception: LOG.exception( _LE("Parser %(parser)r failed to parse network topology."), {'parser': parser}) if not at_least_one_element_for_asked_addresses: # this will mark entries for given addresses as failed to allow # calling this method again as soon it is requested and avoid # waiting for cache expiration raise ValueError( 'No such topology element for given host addresses: {}'.format( ', '.join(addresses)))
def sync_single_resource(self, operation, object_type, context): """Sync over a single resource from Neutron to OpenDaylight. Handle syncing a single operation over to OpenDaylight, and correctly filter attributes out which are not required for the requisite operation (create or update) being handled. """ # Convert underscores to dashes in the URL for ODL object_type_url = object_type.replace('_', '-') try: obj_id = context.current['id'] if operation == odl_const.ODL_DELETE: self.out_of_sync |= not self.client.try_delete( object_type_url + '/' + obj_id) else: filter_cls = self.FILTER_MAP[object_type] if operation == odl_const.ODL_CREATE: urlpath = object_type_url method = 'post' attr_filter = filter_cls.filter_create_attributes elif operation == odl_const.ODL_UPDATE: urlpath = object_type_url + '/' + obj_id method = 'put' attr_filter = filter_cls.filter_update_attributes resource = copy.deepcopy(context.current) attr_filter(resource, context) self.client.sendjson(method, urlpath, {object_type_url[:-1]: resource}) except Exception: with excutils.save_and_reraise_exception(): LOG.error( _LE("Unable to perform %(operation)s on " "%(object_type)s %(object_id)s"), { 'operation': operation, 'object_type': object_type, 'object_id': obj_id }) self.out_of_sync = True
def sync_from_callback(self, operation, object_type, res_id, resource_dict): try: if operation == odl_const.ODL_DELETE: self.out_of_sync |= not self.client.try_delete( object_type + '/' + res_id) else: if operation == odl_const.ODL_CREATE: urlpath = object_type method = 'post' elif operation == odl_const.ODL_UPDATE: urlpath = object_type + '/' + res_id method = 'put' self.client.sendjson(method, urlpath, resource_dict) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Unable to perform %(operation)s on " "%(object_type)s %(res_id)s %(resource_dict)s"), {'operation': operation, 'object_type': object_type, 'res_id': res_id, 'resource_dict': resource_dict}) self.out_of_sync = True
def sync_single_resource(self, operation, object_type, context): """Sync over a single resource from Neutron to OpenDaylight. Handle syncing a single operation over to OpenDaylight, and correctly filter attributes out which are not required for the requisite operation (create or update) being handled. """ # Convert underscores to dashes in the URL for ODL object_type_url = object_type.replace('_', '-') try: obj_id = context.current['id'] if operation == odl_const.ODL_DELETE: self.out_of_sync |= not self.client.try_delete( object_type_url + '/' + obj_id) else: filter_cls = self.FILTER_MAP[object_type] if operation == odl_const.ODL_CREATE: urlpath = object_type_url method = 'post' attr_filter = filter_cls.filter_create_attributes elif operation == odl_const.ODL_UPDATE: urlpath = object_type_url + '/' + obj_id method = 'put' attr_filter = filter_cls.filter_update_attributes resource = copy.deepcopy(context.current) attr_filter(resource, context) self.client.sendjson(method, urlpath, {object_type_url[:-1]: resource}) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Unable to perform %(operation)s on " "%(object_type)s %(object_id)s"), {'operation': operation, 'object_type': object_type, 'object_id': obj_id}) self.out_of_sync = True
def bind_port(self, port_context): """Set binding for a valid segment """ host_name = port_context.host elements = list() try: # Append to empty list to add as much elements as possible # in the case it raises an exception elements.extend(self._fetch_elements_by_host(host_name)) except Exception: LOG.exception( _LE('Error fetching elements for host %(host_name)r.'), {'host_name': host_name}, exc_info=1) if not elements: # In case it wasn't able to find any network topology element # for given host then it uses the legacy OVS one keeping the old # behaviour LOG.warning( _LW('Using legacy OVS network topology element for port ' 'binding for host: %(host_name)r.'), {'host_name': host_name}) # Imported here to avoid cyclic module dependencies from networking_odl.ml2 import ovsdb_topology elements = [ovsdb_topology.OvsdbNetworkTopologyElement()] # TODO(Federico Ressi): in the case there are more candidate virtual # switches instances for the same host it choses one for binding # port. As there isn't any know way to perform this selection it # selects a VIF type that is valid for all switches that have # been found and a VIF type valid for all them. This has to be improved for vif_type in self.valid_vif_types: vif_type_is_valid_for_all = True for element in elements: if vif_type not in element.valid_vif_types: # it is invalid for at least one element: discard it vif_type_is_valid_for_all = False break if vif_type_is_valid_for_all: # This is the best VIF type valid for all elements LOG.debug( "Found VIF type %(vif_type)r valid for all network " "topology elements for host %(host_name)r.", { 'vif_type': vif_type, 'host_name': host_name }) for element in elements: # It assumes that any element could be good for given host # In most of the cases I expect exactely one element for # every compute host try: return element.bind_port(port_context, vif_type, self._vif_details) except Exception: LOG.exception( _LE('Network topology element has failed binding ' 'port:\n%(element)s'), {'element': element.to_json()}) LOG.error( _LE('Unable to bind port element for given host and valid VIF ' 'types:\n' '\thostname: %(host_name)s\n' '\tvalid VIF types: %(valid_vif_types)s'), { 'host_name': host_name, 'valid_vif_types': ', '.join(self.valid_vif_types) })
def sync_pending_row(self, exit_after_run=False): # Block until all pending rows are processed session = neutron_db_api.get_session() while not self.event.is_set(): self.event.wait() # Clear the event and go back to waiting after # the sync block exits self.event.clear() while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = db.VALIDATION_MAP[row.object_type] valid = validate_func(session, row.object_uuid, row.operation, row.data) if not valid: LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Set row back to pending. db.update_db_row_pending(session, row) if exit_after_run: break continue LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_processing_db_row_passed(session, row) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error(_LE("Cannot connect to the Opendaylight " "Controller")) # Set row back to pending db.update_db_row_pending(session, row) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error(_LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), {'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message}) db.update_pending_db_row_retry(session, row, self._row_retry_count) LOG.debug("Clearing sync thread event") if exit_after_run: # Permanently waiting thread model breaks unit tests # Adding this arg to exit here only for unit tests break
def bind_port(self, port_context): """Set binding for a valid segment """ host_name = port_context.host elements = list() try: # Append to empty list to add as much elements as possible # in the case it raises an exception elements.extend(self._fetch_elements_by_host(host_name)) except Exception: LOG.exception( _LE('Error fetching elements for host %(host_name)r.'), {'host_name': host_name}, exc_info=1) if not elements: # In case it wasn't able to find any network topology element # for given host then it uses the legacy OVS one keeping the old # behaviour LOG.warning( _LW('Using legacy OVS network topology element for port ' 'binding for host: %(host_name)r.'), {'host_name': host_name}) # Imported here to avoid cyclic module dependencies from networking_odl.ml2 import ovsdb_topology elements = [ovsdb_topology.OvsdbNetworkTopologyElement()] # TODO(Federico Ressi): in the case there are more candidate virtual # switches instances for the same host it choses one for binding # port. As there isn't any know way to perform this selection it # selects a VIF type that is valid for all switches that have # been found and a VIF type valid for all them. This has to be improved for vif_type in self.valid_vif_types: vif_type_is_valid_for_all = True for element in elements: if vif_type not in element.valid_vif_types: # it is invalid for at least one element: discard it vif_type_is_valid_for_all = False break if vif_type_is_valid_for_all: # This is the best VIF type valid for all elements LOG.debug( "Found VIF type %(vif_type)r valid for all network " "topology elements for host %(host_name)r.", {'vif_type': vif_type, 'host_name': host_name}) for element in elements: # It assumes that any element could be good for given host # In most of the cases I expect exactely one element for # every compute host try: return element.bind_port( port_context, vif_type, self._vif_details) except Exception: LOG.exception( _LE('Network topology element has failed binding ' 'port:\n%(element)s'), {'element': element.to_json()}) LOG.error( _LE('Unable to bind port element for given host and valid VIF ' 'types:\n' '\thostname: %(host_name)s\n' '\tvalid VIF types: %(valid_vif_types)s'), {'host_name': host_name, 'valid_vif_types': ', '.join(self.valid_vif_types)})