def _sync_pending_rows(self, session, exit_after_run): while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = ( dependency_validations.VALIDATION_MAP[row.object_type]) valid = validate_func(session, row) if not valid: LOG.info( _LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), { 'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid }) # Set row back to pending. db.update_db_row_state(session, row, odl_const.PENDING) if exit_after_run: break continue LOG.info( _LI("Syncing %(operation)s %(type)s %(uuid)s"), { 'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid }) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, row, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error(_LE("Cannot connect to the Opendaylight Controller")) # Set row back to pending db.update_db_row_state(session, row, odl_const.PENDING) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error( _LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), { 'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message }) db.update_pending_db_row_retry(session, row, self._row_retry_count)
def _sync_pending_rows(self, session, exit_after_run): while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = (dependency_validations. VALIDATION_MAP[row.object_type]) valid = validate_func(session, row) if not valid: LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Set row back to pending. db.update_db_row_state(session, row, odl_const.PENDING) if exit_after_run: break continue LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, row, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error(_LE("Cannot connect to the Opendaylight Controller")) # Set row back to pending db.update_db_row_state(session, row, odl_const.PENDING) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error(_LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), {'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message}) db.update_pending_db_row_retry(session, row, self._row_retry_count)
def _execute_op(self, operation, session): op_details = operation.__name__ if operation.__doc__: op_details += " (%s)" % operation.func_doc try: LOG.info(_LI("Starting maintenance operation %s."), op_details) db.update_maintenance_operation(session, operation=operation) operation(session=session) LOG.info(_LI("Finished maintenance operation %s."), op_details) except Exception: LOG.exception(_LE("Failed during maintenance operation %s."), op_details)
def execute_ops(self): LOG.info(_LI("Starting journal maintenance run.")) session = neutron_db_api.get_session() if not db.lock_maintenance(session): LOG.info(_LI("Maintenance already running, aborting.")) return try: for operation in self.maintenance_ops: self._execute_op(operation, session) finally: db.update_maintenance_operation(session, operation=None) db.unlock_maintenance(session) LOG.info(_LI("Finished journal maintenance run."))
def execute_ops(self): LOG.info(_LI("Starting journal maintenance run.")) session = neutron_db_api.get_writer_session() if not db.lock_maintenance(session): LOG.info(_LI("Maintenance already running, aborting.")) return try: for operation in self.maintenance_ops: self._execute_op(operation, session) finally: db.update_maintenance_operation(session, operation=None) db.unlock_maintenance(session) LOG.info(_LI("Finished journal maintenance run."))
def create_l2_gateway_connection_postcommit(self, context, l2_gateway_connection): LOG.info(_LI("ODL: Create L2Gateway connection %(l2gwconn)s"), {'l2gwconn': l2_gateway_connection}) odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection) odl_l2_gateway_connection['gateway_id'] = ( l2_gateway_connection['l2_gateway_id']) odl_l2_gateway_connection.pop('l2_gateway_id') request = {'l2gateway_connection': odl_l2_gateway_connection} self.client.sendjson('post', L2GATEWAY_CONNECTIONS, request)
def delete_l2_gateway_postcommit(self, context, l2_gateway_id): LOG.info(_LI("ODL: Delete L2Gateway %(l2gatewayid)s"), {'l2gatewayid': l2_gateway_id}) url = L2GATEWAYS + '/' + l2_gateway_id try: self.client.try_delete(url) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("ODL: L2Gateway delete" " failed for gateway_id %(l2gatewayid)s"), {'l2gatewayid': l2_gateway_id})
def create_l2_gateway_postcommit(self, context, l2_gateway): LOG.info(_LI("ODL: Create L2Gateway %(l2gateway)s"), {'l2gateway': l2_gateway}) request = {'l2_gateway': l2_gateway} try: self.client.sendjson('post', L2GATEWAYS, request) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("ODL: L2Gateway create" " failed for gateway %(l2gatewayid)s"), {'l2gatewayid': l2_gateway['id']})
def _get_and_update_hostconfigs(self, session=None): LOG.info(_LI("REST/GET hostconfigs from ODL")) hostconfigs = self._rest_get_hostconfigs() if not hostconfigs: LOG.warning( _LW("ODL hostconfigs REST/GET failed, " "will retry on next poll")) return # retry on next poll self._update_agents_db(hostconfigs=hostconfigs)
def delete_l2_gateway_connection_postcommit(self, context, l2_gateway_connection_id): LOG.info(_LI("ODL: Delete L2Gateway connection %(l2gwconnid)s"), {'l2gwconnid': l2_gateway_connection_id}) url = L2GATEWAY_CONNECTIONS + '/' + l2_gateway_connection_id try: self.client.try_delete(url) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("ODL: L2Gateway connection delete" " failed for connection %(l2gwconnid)s"), {'l2gwconnid': l2_gateway_connection_id})
def create_l2_gateway_connection_postcommit(self, context, l2_gateway_connection): LOG.info(_LI("ODL: Create L2Gateway connection %(l2gwconn)s"), {'l2gwconn': l2_gateway_connection}) odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection) odl_l2_gateway_connection['gateway_id'] = ( l2_gateway_connection['l2_gateway_id']) odl_l2_gateway_connection.pop('l2_gateway_id') request = {'l2gateway_connection': odl_l2_gateway_connection} try: self.client.sendjson('post', L2GATEWAY_CONNECTIONS, request) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("ODL: L2Gateway connection create" " failed for gateway %(l2gwconnid)s"), {'l2gwconnid': l2_gateway_connection['l2_gateway_id']})
def bind_port(self, port_context): controller_details = {'name': self.name, 'controller': self.controller} try: self.controller.bind_port(port_context) except Exception: with excutils.save_and_reraise_exception(): LOG.exception( _LE("Controller '%(name)s (%(controller)r)' had an error " "when binding port."), controller_details) else: if port_context._new_bound_segment: LOG.info( _LI("Controller '%(name)s (%(controller)r)' has bound " "port."), controller_details) else: LOG.debug( "Controller %(name)s (%(controller)r) hasn't bound " "port.", controller_details)
def _fetch_and_parse_network_topology(self, addresses): # The cache calls this method to fecth new elements when at least one # of the addresses is not in the cache or it has expired. # pylint: disable=unused-argument LOG.info(_LI('Fetch network topology from ODL.')) response = self._client.get() response.raise_for_status() network_topology = response.json() if LOG.isEnabledFor(logging.DEBUG): topology_str = jsonutils.dumps(network_topology, sort_keys=True, indent=4, separators=(',', ': ')) LOG.debug("Got network topology:\n%s", topology_str) at_least_one_element_for_asked_addresses = False for parser in self._parsers: try: for element in parser.parse_network_topology(network_topology): if not isinstance(element, NetworkTopologyElement): raise TypeError( _("Yield element doesn't implement interface " "'NetworkTopologyElement': {!r}").format( element)) # the same element can be known by more host addresses for host_address in element.host_addresses: if host_address in addresses: at_least_one_element_for_asked_addresses = True yield host_address, element except Exception: LOG.exception( _LE("Parser %(parser)r failed to parse network topology."), {'parser': parser}) if not at_least_one_element_for_asked_addresses: # this will mark entries for given addresses as failed to allow # calling this method again as soon it is requested and avoid # waiting for cache expiration raise ValueError( _('No such topology element for given host addresses: {}'). format(', '.join(addresses)))
def _fetch_and_parse_network_topology(self, addresses): # The cache calls this method to fecth new elements when at least one # of the addresses is not in the cache or it has expired. # pylint: disable=unused-argument LOG.info(_LI('Fetch network topology from ODL.')) response = self._client.get() response.raise_for_status() network_topology = response.json() if LOG.isEnabledFor(logging.DEBUG): topology_str = jsonutils.dumps( network_topology, sort_keys=True, indent=4, separators=(',', ': ')) LOG.debug("Got network topology:\n%s", topology_str) at_least_one_element_for_asked_addresses = False for parser in self._parsers: try: for element in parser.parse_network_topology(network_topology): if not isinstance(element, NetworkTopologyElement): raise TypeError( "Yield element doesn't implement interface " "'NetworkTopologyElement': {!r}".format(element)) # the same element can be known by more host addresses for host_address in element.host_addresses: if host_address in addresses: at_least_one_element_for_asked_addresses = True yield host_address, element except Exception: LOG.exception( _LE("Parser %(parser)r failed to parse network topology."), {'parser': parser}) if not at_least_one_element_for_asked_addresses: # this will mark entries for given addresses as failed to allow # calling this method again as soon it is requested and avoid # waiting for cache expiration raise ValueError( 'No such topology element for given host addresses: {}'.format( ', '.join(addresses)))
def sync_pending_row(self, exit_after_run=False): # Block until all pending rows are processed session = neutron_db_api.get_session() while not self.event.is_set(): self.event.wait() # Clear the event and go back to waiting after # the sync block exits self.event.clear() while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = ( dependency_validations.VALIDATION_MAP[row.object_type]) valid = validate_func(session, row) if not valid: LOG.info( _LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), { 'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid }) # Set row back to pending. db.update_db_row_state(session, row, odl_const.PENDING) if exit_after_run: break continue LOG.info( _LI("Syncing %(operation)s %(type)s %(uuid)s"), { 'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid }) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, row, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error( _LE("Cannot connect to the Opendaylight " "Controller")) # Set row back to pending db.update_db_row_state(session, row, odl_const.PENDING) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error( _LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), { 'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message }) db.update_pending_db_row_retry(session, row, self._row_retry_count) LOG.debug("Clearing sync thread event") if exit_after_run: # Permanently waiting thread model breaks unit tests # Adding this arg to exit here only for unit tests break
def sync_pending_row(self, exit_after_run=False): # Block until all pending rows are processed session = neutron_db_api.get_session() while not self.event.is_set(): self.event.wait() # Clear the event and go back to waiting after # the sync block exits self.event.clear() while True: LOG.debug("Thread walking database") row = db.get_oldest_pending_db_row_with_lock(session) if not row: LOG.debug("No rows to sync") break # Validate the operation validate_func = (dependency_validations. VALIDATION_MAP[row.object_type]) valid = validate_func(session, row) if not valid: LOG.info(_LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Set row back to pending. db.update_db_row_state(session, row, odl_const.PENDING) if exit_after_run: break continue LOG.info(_LI("Syncing %(operation)s %(type)s %(uuid)s"), {'operation': row.operation, 'type': row.object_type, 'uuid': row.object_uuid}) # Add code to sync this to ODL method, urlpath, to_send = self._json_data(row) try: self.client.sendjson(method, urlpath, to_send) db.update_db_row_state(session, row, odl_const.COMPLETED) except exceptions.ConnectionError as e: # Don't raise the retry count, just log an error LOG.error(_LE("Cannot connect to the Opendaylight " "Controller")) # Set row back to pending db.update_db_row_state(session, row, odl_const.PENDING) # Break our of the loop and retry with the next # timer interval break except Exception as e: LOG.error(_LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"), {'type': row.object_type, 'uuid': row.object_uuid, 'operation': row.operation, 'error': e.message}) db.update_pending_db_row_retry(session, row, self._row_retry_count) LOG.debug("Clearing sync thread event") if exit_after_run: # Permanently waiting thread model breaks unit tests # Adding this arg to exit here only for unit tests break
def delete_l2_gateway_connection_postcommit(self, context, l2_gateway_connection_id): LOG.info(_LI("ODL: Delete L2Gateway connection %(l2gwconnid)s"), {'l2gwconnid': l2_gateway_connection_id}) url = L2GATEWAY_CONNECTIONS + '/' + l2_gateway_connection_id self.client.try_delete(url)
def __init__(self): cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl") self.journal = journal.OpendaylightJournalThread() LOG.info(_LI('initialized trunk driver for OpendayLight'))
def update_l2_gateway_postcommit(self, context, l2_gateway): LOG.info(_LI("ODL: Update L2Gateway %(l2gateway)s"), {'l2gateway': l2_gateway}) request = {'l2_gateway': l2_gateway} url = L2GATEWAYS + '/' + l2_gateway['id'] self.client.sendjson('put', url, request)
def delete_l2_gateway_postcommit(self, context, l2_gateway_id): LOG.info(_LI("ODL: Delete L2Gateway %(l2gatewayid)s"), {'l2gatewayid': l2_gateway_id}) url = L2GATEWAYS + '/' + l2_gateway_id self.client.try_delete(url)
def create_l2_gateway_postcommit(self, context, l2_gateway): LOG.info(_LI("ODL: Create L2Gateway %(l2gateway)s"), {'l2gateway': l2_gateway}) request = {'l2_gateway': l2_gateway} self.client.sendjson('post', L2GATEWAYS, request)
def __init__(self, service_plugin): LOG.info(_LI("Initializing OpenDaylight BGPVPN v2 driver")) super(OpenDaylightBgpvpnDriver, self).__init__(service_plugin) self.journal = journal.OpendaylightJournalThread()
def __init__(self, service_plugin, validator=None): super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator) self.service_plugin = service_plugin self.client = odl_client.OpenDaylightRestClient.create_client() LOG.info(_LI("ODL: Started OpenDaylight L2Gateway driver"))
def __init__(self): self.client = odl_client.OpenDaylightRestClient.create_client() LOG.info(_LI('initialized trunk driver for OpendayLight'))
def __init__(self, service_plugin, validator=None): super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator) self.service_plugin = service_plugin self.journal = journal.OpendaylightJournalThread() LOG.info(_LI("ODL: Started OpenDaylight L2Gateway V2 driver"))
def cleanup_processing_rows(self, session): row_count = db.reset_processing_rows(session, self._processing_timeout) if row_count: LOG.info(_LI("Reset %(num)s orphaned rows back to pending"), {"num": row_count})
def _execute(self, *args): command_line = (self.COMMAND, ) + args LOG.info(_LI("SET-HOSTCONFIGS: Executing cmd: %s"), ' '.join(command_line)) return subprocess.check_output(command_line).strip()
def create_flow_classifier_precommit(self, context): LOG.info(_LI("Skipping precommit check."))