Esempio n. 1
0
    def sync_pending_row(self, exit_after_run=False):
        # Block until all pending rows are processed
        session = neutron_db_api.get_session()
        while not self.event.is_set():
            self.event.wait()
            # Clear the event and go back to waiting after
            # the sync block exits
            self.event.clear()
            while True:
                LOG.debug("Thread walking database")
                row = db.get_oldest_pending_db_row_with_lock(session)
                if not row:
                    LOG.debug("No rows to sync")
                    break

                # Validate the operation
                validate_func = db.VALIDATION_MAP[row.object_type]
                valid = validate_func(session, row.object_uuid, row.operation, row.data)
                if not valid:
                    LOG.info(
                        _LI("%(operation)s %(type)s %(uuid)s is not a " "valid operation yet, skipping for now"),
                        {"operation": row.operation, "type": row.object_type, "uuid": row.object_uuid},
                    )
                    continue

                LOG.info(
                    _LI("Syncing %(operation)s %(type)s %(uuid)s"),
                    {"operation": row.operation, "type": row.object_type, "uuid": row.object_uuid},
                )

                # Add code to sync this to ODL
                method, urlpath, to_send = self._json_data(row)

                try:
                    self.client.sendjson(method, urlpath, to_send)
                    db.update_processing_db_row_passed(session, row)
                except exceptions.ConnectionError as e:
                    # Don't raise the retry count, just log an error
                    LOG.error(_LE("Cannot connect to the Opendaylight " "Controller"))
                    # Set row back to pending
                    db.update_db_row_pending(session, row)
                    # Break our of the loop and retry with the next
                    # timer interval
                    break
                except Exception as e:
                    LOG.error(
                        _LE("Error syncing %(type)s %(operation)s," " id %(uuid)s Error: %(error)s"),
                        {
                            "type": row.object_type,
                            "uuid": row.object_uuid,
                            "operation": row.operation,
                            "error": e.message,
                        },
                    )
                    db.update_pending_db_row_retry(session, row, self._row_retry_count)
            LOG.debug("Clearing sync thread event")
            if exit_after_run:
                # Permanently waiting thread model breaks unit tests
                # Adding this arg to exit here only for unit tests
                break
Esempio n. 2
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = _ts()
                    self.f(*self.args, **self.kw)
                    end = _ts()
                    if not self._running:
                        break
                    delay = end - start - interval
                    if delay > 0:
                        LOG.warn(
                            _LW('task %(func_name)r run outlasted '
                                'interval by %(delay).2f sec'), {
                                    'func_name': self.f,
                                    'delay': delay
                                })
                    greenthread.sleep(-delay if delay < 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Esempio n. 3
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    idle = self.f(*self.args, **self.kw)
                    if not self._running:
                        break

                    if periodic_interval_max is not None:
                        idle = min(idle, periodic_interval_max)
                    LOG.debug(
                        'Dynamic looping call %(func_name)r sleeping '
                        'for %(idle).02f seconds', {
                            'func_name': self.f,
                            'idle': idle
                        })
                    greenthread.sleep(idle)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in dynamic looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Esempio n. 4
0
    def sync_single_resource(self, operation, object_type, context):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        # Convert underscores to dashes in the URL for ODL
        object_type_url = object_type.replace('_', '-')
        try:
            obj_id = context.current['id']
            if operation == odl_const.ODL_DELETE:
                self.client.sendjson('delete', object_type_url + '/' + obj_id,
                                     None)
            else:
                if operation == odl_const.ODL_CREATE:
                    urlpath = object_type_url
                    method = 'post'
                    attr_filter = self.create_object_map[object_type]
                elif operation == odl_const.ODL_UPDATE:
                    urlpath = object_type_url + '/' + obj_id
                    method = 'put'
                    attr_filter = self.update_object_map[object_type]
                resource = context.current.copy()
                attr_filter(resource, context)
                self.client.sendjson(method, urlpath,
                                     {object_type_url[:-1]: resource})
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Unable to perform %(operation)s on "
                              "%(object_type)s %(object_id)s"),
                          {'operation': operation,
                           'object_type': object_type,
                           'object_id': obj_id})
                self.out_of_sync = True
Esempio n. 5
0
    def run_periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # Check if due, if not skip
            idle_for = min(idle_for, spacing)
            if last_run is not None:
                delta = last_run + spacing - time.time()
                if delta > 0:
                    idle_for = min(idle_for, delta)
                    continue

            LOG.debug("Running periodic task %(full_task_name)s",
                      {"full_task_name": full_task_name})
            self._periodic_last_run[task_name] = _nearest_boundary(
                last_run, spacing)

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
                              {"full_task_name": full_task_name, "e": e})
            time.sleep(0)

        return idle_for
Esempio n. 6
0
 def sync_from_callback(self, operation, object_type, res_id,
                        resource_dict):
     try:
         if operation == odl_const.ODL_DELETE:
             self.out_of_sync |= not self.client.try_delete(object_type +
                                                            '/' + res_id)
         else:
             if operation == odl_const.ODL_CREATE:
                 urlpath = object_type
                 method = 'post'
             elif operation == odl_const.ODL_UPDATE:
                 urlpath = object_type + '/' + res_id
                 method = 'put'
             self.client.sendjson(method, urlpath, resource_dict)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(
                 _LE("Unable to perform %(operation)s on "
                     "%(object_type)s %(res_id)s %(resource_dict)s"), {
                         'operation': operation,
                         'object_type': object_type,
                         'res_id': res_id,
                         'resource_dict': resource_dict
                     })
             self.out_of_sync = True
Esempio n. 7
0
    def run_periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # Check if due, if not skip
            idle_for = min(idle_for, spacing)
            if last_run is not None:
                delta = last_run + spacing - time.time()
                if delta > 0:
                    idle_for = min(idle_for, delta)
                    continue

            LOG.debug("Running periodic task %(full_task_name)s",
                      {"full_task_name": full_task_name})
            self._periodic_last_run[task_name] = _nearest_boundary(
                last_run, spacing)

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), {
                    "full_task_name": full_task_name,
                    "e": e
                })
            time.sleep(0)

        return idle_for
Esempio n. 8
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    start = _ts()
                    self.f(*self.args, **self.kw)
                    end = _ts()
                    if not self._running:
                        break
                    delay = end - start - interval
                    if delay > 0:
                        LOG.warn(_LW('task %(func_name)r run outlasted '
                                     'interval by %(delay).2f sec'),
                                 {'func_name': self.f, 'delay': delay})
                    greenthread.sleep(-delay if delay < 0 else 0)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in fixed duration looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Esempio n. 9
0
        def _inner():
            if initial_delay:
                greenthread.sleep(initial_delay)

            try:
                while self._running:
                    idle = self.f(*self.args, **self.kw)
                    if not self._running:
                        break

                    if periodic_interval_max is not None:
                        idle = min(idle, periodic_interval_max)
                    LOG.debug('Dynamic looping call %(func_name)r sleeping '
                              'for %(idle).02f seconds',
                              {'func_name': self.f, 'idle': idle})
                    greenthread.sleep(idle)
            except LoopingCallDone as e:
                self.stop()
                done.send(e.retvalue)
            except Exception:
                LOG.exception(_LE('in dynamic looping call'))
                done.send_exception(*sys.exc_info())
                return
            else:
                done.send(True)
Esempio n. 10
0
    def _create_parsers(self):
        for parser_name in self.network_topology_parsers:
            try:
                yield NetworkTopologyParser.create_parser(parser_name)

            except Exception:
                LOG.exception(
                    _LE('Error initializing topology parser: %(parser_name)r'),
                    {'parser_name': parser_name})
Esempio n. 11
0
    def _create_parsers(self):
        for parser_name in self.network_topology_parsers:
            try:
                yield NetworkTopologyParser.create_parser(parser_name)

            except Exception:
                LOG.exception(
                    _LE('Error initializing topology parser: %(parser_name)r'),
                    {'parser_name': parser_name})
Esempio n. 12
0
    def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
        '''Yields all network topology elements referring to given host name

        '''

        host_addresses = [host_name]
        try:
            # It uses both compute host name and known IP addresses to
            # recognize topology elements valid for given computed host
            ip_addresses = utils.get_addresses_by_name(host_name)
        except Exception:
            ip_addresses = []
            LOG.exception(
                _LE('Unable to resolve IP addresses for host %(host_name)r'),
                {'host_name': host_name})
        else:
            host_addresses.extend(ip_addresses)

        yield_elements = set()
        try:
            for _, element in self._elements_by_ip.fetch_all(
                    host_addresses, cache_timeout):
                # yields every element only once
                if element not in yield_elements:
                    yield_elements.add(element)
                    yield element

        except cache.CacheFetchError as error:
            # This error is expected on most of the cases because typically not
            # all host_addresses maps to a network topology element.
            if yield_elements:
                # As we need only one element for every host we ignore the
                # case in which others host addresseses didn't map to any host
                LOG.debug(
                    'Host addresses not found in networking topology: %s',
                    ', '.join(error.missing_keys))
            else:
                LOG.exception(
                    _LE('No such network topology elements for given host '
                        '%(host_name)r and given IPs: %(ip_addresses)s.'), {
                            'host_name': host_name,
                            'ip_addresses': ", ".join(ip_addresses)
                        })
                error.reraise_cause()
Esempio n. 13
0
    def _fetch_elements_by_host(self, host_name, cache_timeout=60.0):
        '''Yields all network topology elements referring to given host name

        '''

        host_addresses = [host_name]
        try:
            # It uses both compute host name and known IP addresses to
            # recognize topology elements valid for given computed host
            ip_addresses = utils.get_addresses_by_name(host_name)
        except Exception:
            ip_addresses = []
            LOG.exception(
                _LE('Unable to resolve IP addresses for host %(host_name)r'),
                {'host_name': host_name})
        else:
            host_addresses.extend(ip_addresses)

        yield_elements = set()
        try:
            for _, element in self._elements_by_ip.fetch_all(
                    host_addresses, cache_timeout):
                # yields every element only once
                if element not in yield_elements:
                    yield_elements.add(element)
                    yield element

        except cache.CacheFetchError as error:
            # This error is expected on most of the cases because typically not
            # all host_addresses maps to a network topology element.
            if yield_elements:
                # As we need only one element for every host we ignore the
                # case in which others host addresseses didn't map to any host
                LOG.debug(
                    'Host addresses not found in networking topology: %s',
                    ', '.join(error.missing_keys))
            else:
                LOG.exception(
                    _LE('No such network topology elements for given host '
                        '%(host_name)r and given IPs: %(ip_addresses)s.'),
                    {'host_name': host_name,
                     'ip_addresses': ", ".join(ip_addresses)})
                error.reraise_cause()
Esempio n. 14
0
def _parse_check(rule):
    """Parse a single base check rule into an appropriate Check object."""

    # Handle the special checks
    if rule == '!':
        return FalseCheck()
    elif rule == '@':
        return TrueCheck()

    try:
        kind, match = rule.split(':', 1)
    except Exception:
        LOG.exception(_LE("Failed to understand rule %s") % rule)
        # If the rule is invalid, we'll fail closed
        return FalseCheck()

    # Find what implements the check
    if kind in _checks:
        return _checks[kind](kind, match)
    elif None in _checks:
        return _checks[None](kind, match)
    else:
        LOG.error(_LE("No handler for matches of kind %s") % kind)
        return FalseCheck()
Esempio n. 15
0
def _parse_check(rule):
    """Parse a single base check rule into an appropriate Check object."""

    # Handle the special checks
    if rule == '!':
        return FalseCheck()
    elif rule == '@':
        return TrueCheck()

    try:
        kind, match = rule.split(':', 1)
    except Exception:
        LOG.exception(_LE("Failed to understand rule %s") % rule)
        # If the rule is invalid, we'll fail closed
        return FalseCheck()

    # Find what implements the check
    if kind in _checks:
        return _checks[kind](kind, match)
    elif None in _checks:
        return _checks[None](kind, match)
    else:
        LOG.error(_LE("No handler for matches of kind %s") % kind)
        return FalseCheck()
Esempio n. 16
0
 def update_network_segments(self, new_obj):
     obj_id = new_obj.get('id')
     LOG.debug("update network segments for %(obj_id)s",
             {'obj_id': obj_id})
     try:
         new_segments = new_obj.get('segments')
         if new_segments == None:
             return
         LOG.debug("new_segments %(new_segments)s",
             {'new_segments': new_segments})
         session = db_api.get_session()
         with session.begin(subtransactions=True):
             segments=db.get_network_segments(session, obj_id)
             LOG.debug("update network segments when old segments %(segments)s",
                 {'segments': segments})
             phy_network = self.physical_network
             for segment in segments:
                 db.delete_network_segment(session, segment.get(api.ID))
             for new_segment in new_segments:
                 network_type = new_segment.get(provider.NETWORK_TYPE)
                 model = None
                 filters = {}
                 segmentation_id = new_segment.get(provider.SEGMENTATION_ID)
                 new_segment_db = {api.NETWORK_TYPE: network_type,
                                   api.PHYSICAL_NETWORK: new_segment.get(provider.PHYSICAL_NETWORK),
                                   api.SEGMENTATION_ID: segmentation_id}
                 if new_segment.get(provider.NETWORK_TYPE) == 'vlan':
                     segment_index = 0                        
                     new_segment_db = {api.NETWORK_TYPE: network_type,
                                   api.PHYSICAL_NETWORK: phy_network,
                                   api.SEGMENTATION_ID: segmentation_id}
                     model = type_vlan.VlanAllocation
                     filters['physical_network'] = phy_network
                     filters['vlan_id'] = segmentation_id
                 else:
                     segment_index = 1
                     model = type_vxlan.VxlanAllocation
                     filters['vxlan_vni'] = segmentation_id
                 LOG.debug("update network segments when new segment %(new_segment)s",
                     {'new_segment': new_segment_db})
                 self.allocate_fully_specified_segment(session, network_type, model, **filters)
                 db.add_network_segment(session, obj_id, new_segment_db, segment_index)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(_LE("update network segments error on "
                           "%(object_id)s"),
                       {'object_id': obj_id})
Esempio n. 17
0
    def sync_single_resource(self, operation, object_type, context):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        # Convert underscores to dashes in the URL for ODL
        object_type_url = object_type.replace('_', '-')
        try:
            obj_id = context.current['id']
            if operation == odl_const.ODL_DELETE:
                self.out_of_sync |= not self.client.try_delete(
                    object_type_url + '/' + obj_id)
                LOG.debug("sync_single_resource 1 out_of_sync is %(out_of_sync)s",
                   {'out_of_sync': self.out_of_sync})
            else:
                filter_cls = self.FILTER_MAP[object_type]
                if operation == odl_const.ODL_CREATE:
                    urlpath = object_type_url
                    method = 'post'
                    attr_filter = filter_cls.filter_create_attributes
                elif operation == odl_const.ODL_UPDATE:
                    urlpath = object_type_url + '/' + obj_id
                    method = 'put'
                    attr_filter = filter_cls.filter_update_attributes
                resource = context.current.copy()
                attr_filter(resource, context)
                new_obj_obj = self.client.sendjson(method, urlpath,
                                     {object_type_url[:-1]: resource})
                
                # byhqf
                if operation == odl_const.ODL_CREATE:
                    if object_type == odl_const.ODL_NETWORKS:
                        self.update_network_segments(new_obj_obj.get('network'))
                # byhqf end
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Unable to perform %(operation)s on "
                              "%(object_type)s %(object_id)s"),
                          {'operation': operation,
                           'object_type': object_type,
                           'object_id': obj_id})
                self.out_of_sync = True
                LOG.debug("sync_single_resource 2 out_of_sync is %(out_of_sync)s",
                   {'out_of_sync': self.out_of_sync})
Esempio n. 18
0
    def _fetch_and_parse_network_topology(self, addresses):
        # The cache calls this method to fecth new elements when at least one
        # of the addresses is not in the cache or it has expired.

        # pylint: disable=unused-argument
        LOG.info(_LI('Fetch network topology from ODL.'))
        response = self._client.get()
        response.raise_for_status()

        network_topology = response.json()
        if LOG.isEnabledFor(logging.DEBUG):
            topology_str = jsonutils.dumps(network_topology,
                                           sort_keys=True,
                                           indent=4,
                                           separators=(',', ': '))
            LOG.debug("Got network topology:\n%s", topology_str)

        at_least_one_element_for_asked_addresses = False
        for parser in self._parsers:
            try:
                for element in parser.parse_network_topology(network_topology):
                    if not isinstance(element, NetworkTopologyElement):
                        raise TypeError(
                            "Yield element doesn't implement interface "
                            "'NetworkTopologyElement': {!r}".format(element))
                    # the same element can be known by more host addresses
                    for host_address in element.host_addresses:
                        if host_address in addresses:
                            at_least_one_element_for_asked_addresses = True
                        yield host_address, element
            except Exception:
                LOG.exception(
                    _LE("Parser %(parser)r failed to parse network topology."),
                    {'parser': parser})

        if not at_least_one_element_for_asked_addresses:
            # this will mark entries for given addresses as failed to allow
            # calling this method again as soon it is requested and avoid
            # waiting for cache expiration
            raise ValueError(
                'No such topology element for given host addresses: {}'.format(
                    ', '.join(addresses)))
Esempio n. 19
0
    def _fetch_and_parse_network_topology(self, addresses):
        # The cache calls this method to fecth new elements when at least one
        # of the addresses is not in the cache or it has expired.

        # pylint: disable=unused-argument
        LOG.info(_LI('Fetch network topology from ODL.'))
        response = self._client.get()
        response.raise_for_status()

        network_topology = response.json()
        if LOG.isEnabledFor(logging.DEBUG):
            topology_str = jsonutils.dumps(
                network_topology, sort_keys=True, indent=4,
                separators=(',', ': '))
            LOG.debug("Got network topology:\n%s", topology_str)

        at_least_one_element_for_asked_addresses = False
        for parser in self._parsers:
            try:
                for element in parser.parse_network_topology(network_topology):
                    if not isinstance(element, NetworkTopologyElement):
                        raise TypeError(
                            "Yield element doesn't implement interface "
                            "'NetworkTopologyElement': {!r}".format(element))
                    # the same element can be known by more host addresses
                    for host_address in element.host_addresses:
                        if host_address in addresses:
                            at_least_one_element_for_asked_addresses = True
                        yield host_address, element
            except Exception:
                LOG.exception(
                    _LE("Parser %(parser)r failed to parse network topology."),
                    {'parser': parser})

        if not at_least_one_element_for_asked_addresses:
            # this will mark entries for given addresses as failed to allow
            # calling this method again as soon it is requested and avoid
            # waiting for cache expiration
            raise ValueError(
                'No such topology element for given host addresses: {}'.format(
                    ', '.join(addresses)))
Esempio n. 20
0
    def _child_wait_for_exit_or_signal(self, launcher):
        status = 0
        signo = 0

        # NOTE(johannes): All exceptions are caught to ensure this
        # doesn't fallback into the loop spawning children. It would
        # be bad for a child to spawn more children.
        try:
            launcher.wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Child caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        except BaseException:
            LOG.exception(_LE('Unhandled exception'))
            status = 2
        finally:
            launcher.stop()

        return status, signo
Esempio n. 21
0
 def sync_from_callback(self, operation, object_type, res_id,
                        resource_dict):
     try:
         if operation == odl_const.ODL_DELETE:
             self.out_of_sync |= not self.client.try_delete(
                 object_type + '/' + res_id)
         else:
             if operation == odl_const.ODL_CREATE:
                 urlpath = object_type
                 method = 'post'
             elif operation == odl_const.ODL_UPDATE:
                 urlpath = object_type + '/' + res_id
                 method = 'put'
             self.client.sendjson(method, urlpath, resource_dict)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.error(_LE("Unable to perform %(operation)s on "
                           "%(object_type)s %(res_id)s %(resource_dict)s"),
                       {'operation': operation,
                        'object_type': object_type,
                        'res_id': res_id,
                        'resource_dict': resource_dict})
             self.out_of_sync = True
Esempio n. 22
0
    def _child_wait_for_exit_or_signal(self, launcher):
        status = 0
        signo = 0

        # NOTE(johannes): All exceptions are caught to ensure this
        # doesn't fallback into the loop spawning children. It would
        # be bad for a child to spawn more children.
        try:
            launcher.wait()
        except SignalExit as exc:
            signame = _signo_to_signame(exc.signo)
            LOG.info(_LI('Child caught %s, exiting'), signame)
            status = exc.code
            signo = exc.signo
        except SystemExit as exc:
            status = exc.code
        except BaseException:
            LOG.exception(_LE('Unhandled exception'))
            status = 2
        finally:
            launcher.stop()

        return status, signo
Esempio n. 23
0
    def sync_single_resource(self, operation, object_type, context):
        """Sync over a single resource from Neutron to OpenDaylight.

        Handle syncing a single operation over to OpenDaylight, and correctly
        filter attributes out which are not required for the requisite
        operation (create or update) being handled.
        """
        # Convert underscores to dashes in the URL for ODL
        object_type_url = object_type.replace('_', '-')
        try:
            obj_id = context.current['id']
            if operation == odl_const.ODL_DELETE:
                self.client.sendjson('delete', object_type_url + '/' + obj_id,
                                     None)
            else:
                if operation == odl_const.ODL_CREATE:
                    urlpath = object_type_url
                    method = 'post'
                    attr_filter = self.create_object_map[object_type]
                elif operation == odl_const.ODL_UPDATE:
                    urlpath = object_type_url + '/' + obj_id
                    method = 'put'
                    attr_filter = self.update_object_map[object_type]
                resource = context.current.copy()
                attr_filter(resource, context)
                self.client.sendjson(method, urlpath,
                                     {object_type_url[:-1]: resource})
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Unable to perform %(operation)s on "
                        "%(object_type)s %(object_id)s"), {
                            'operation': operation,
                            'object_type': object_type,
                            'object_id': obj_id
                        })
                self.out_of_sync = True
Esempio n. 24
0
def _parse_text_rule(rule):
    """Parses policy to the tree.

    Translates a policy written in the policy language into a tree of
    Check objects.
    """

    # Empty rule means always accept
    if not rule:
        return TrueCheck()

    # Parse the token stream
    state = ParseState()
    for tok, value in _parse_tokenize(rule):
        state.shift(tok, value)

    try:
        return state.result
    except ValueError:
        # Couldn't parse the rule
        LOG.exception(_LE("Failed to understand rule %s") % rule)

        # Fail closed
        return FalseCheck()
Esempio n. 25
0
def _parse_text_rule(rule):
    """Parses policy to the tree.

    Translates a policy written in the policy language into a tree of
    Check objects.
    """

    # Empty rule means always accept
    if not rule:
        return TrueCheck()

    # Parse the token stream
    state = ParseState()
    for tok, value in _parse_tokenize(rule):
        state.shift(tok, value)

    try:
        return state.result
    except ValueError:
        # Couldn't parse the rule
        LOG.exception(_LE("Failed to understand rule %s") % rule)

        # Fail closed
        return FalseCheck()
Esempio n. 26
0
    def sync_pending_row(self, exit_after_run=False):
        # Block until all pending rows are processed
        session = neutron_db_api.get_session()
        while not self.event.is_set():
            self.event.wait()
            # Clear the event and go back to waiting after
            # the sync block exits
            self.event.clear()
            while True:
                LOG.debug("Thread walking database")
                row = db.get_oldest_pending_db_row_with_lock(session)
                if not row:
                    LOG.debug("No rows to sync")
                    break

                # Validate the operation
                validate_func = db.VALIDATION_MAP[row.object_type]
                valid = validate_func(session, row.object_uuid, row.operation,
                                      row.data)
                if not valid:
                    LOG.info(
                        _LI("%(operation)s %(type)s %(uuid)s is not a "
                            "valid operation yet, skipping for now"), {
                                'operation': row.operation,
                                'type': row.object_type,
                                'uuid': row.object_uuid
                            })
                    continue

                LOG.info(
                    _LI("Syncing %(operation)s %(type)s %(uuid)s"), {
                        'operation': row.operation,
                        'type': row.object_type,
                        'uuid': row.object_uuid
                    })

                # Add code to sync this to ODL
                method, urlpath, to_send = self._json_data(row)

                try:
                    self.client.sendjson(method, urlpath, to_send)
                    db.update_processing_db_row_passed(session, row)
                except exceptions.ConnectionError as e:
                    # Don't raise the retry count, just log an error
                    LOG.error(
                        _LE("Cannot connect to the Opendaylight "
                            "Controller"))
                    # Set row back to pending
                    db.update_db_row_pending(session, row)
                    # Break our of the loop and retry with the next
                    # timer interval
                    break
                except Exception as e:
                    LOG.error(
                        _LE("Error syncing %(type)s %(operation)s,"
                            " id %(uuid)s Error: %(error)s"), {
                                'type': row.object_type,
                                'uuid': row.object_uuid,
                                'operation': row.operation,
                                'error': e.message
                            })
                    db.update_pending_db_row_retry(session, row,
                                                   self._row_retry_count)
            LOG.debug("Clearing sync thread event")
            if exit_after_run:
                # Permanently waiting thread model breaks unit tests
                # Adding this arg to exit here only for unit tests
                break
Esempio n. 27
0
    def bind_port(self, port_context):
        """Set binding for a valid segment

        """
        host_name = port_context.host
        elements = list()
        try:
            # Append to empty list to add as much elements as possible
            # in the case it raises an exception
            elements.extend(self._fetch_elements_by_host(host_name))
        except Exception:
            LOG.exception(
                _LE('Error fetching elements for host %(host_name)r.'),
                {'host_name': host_name},
                exc_info=1)

        if not elements:
            # In case it wasn't able to find any network topology element
            # for given host then it uses the legacy OVS one keeping the old
            # behaviour
            LOG.warning(
                _LW('Using legacy OVS network topology element for port '
                    'binding for host: %(host_name)r.'),
                {'host_name': host_name})

            # Imported here to avoid cyclic module dependencies
            from networking_odl.ml2 import ovsdb_topology
            elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]

        # TODO(Federico Ressi): in the case there are more candidate virtual
        # switches instances for the same host it choses one for binding
        # port. As there isn't any know way to perform this selection it
        # selects a VIF type that is valid for all switches that have
        # been found and a VIF type valid for all them. This has to be improved
        for vif_type in self.valid_vif_types:
            vif_type_is_valid_for_all = True
            for element in elements:
                if vif_type not in element.valid_vif_types:
                    # it is invalid for at least one element: discard it
                    vif_type_is_valid_for_all = False
                    break

            if vif_type_is_valid_for_all:
                # This is the best VIF type valid for all elements
                LOG.debug(
                    "Found VIF type %(vif_type)r valid for all network "
                    "topology elements for host %(host_name)r.", {
                        'vif_type': vif_type,
                        'host_name': host_name
                    })

                for element in elements:
                    # It assumes that any element could be good for given host
                    # In most of the cases I expect exactely one element for
                    # every compute host
                    try:
                        return element.bind_port(port_context, vif_type,
                                                 self._vif_details)

                    except Exception:
                        LOG.exception(
                            _LE('Network topology element has failed binding '
                                'port:\n%(element)s'),
                            {'element': element.to_json()})

        LOG.error(
            _LE('Unable to bind port element for given host and valid VIF '
                'types:\n'
                '\thostname: %(host_name)s\n'
                '\tvalid VIF types: %(valid_vif_types)s'), {
                    'host_name': host_name,
                    'valid_vif_types': ', '.join(self.valid_vif_types)
                })
Esempio n. 28
0
    def bind_port(self, port_context):
        """Set binding for a valid segment

        """
        host_name = port_context.host
        elements = list()
        try:
            # Append to empty list to add as much elements as possible
            # in the case it raises an exception
            elements.extend(self._fetch_elements_by_host(host_name))
        except Exception:
            LOG.exception(
                _LE('Error fetching elements for host %(host_name)r.'),
                {'host_name': host_name}, exc_info=1)

        if not elements:
            # In case it wasn't able to find any network topology element
            # for given host then it uses the legacy OVS one keeping the old
            # behaviour
            LOG.warning(
                _LW('Using legacy OVS network topology element for port '
                    'binding for host: %(host_name)r.'),
                {'host_name': host_name})

            # Imported here to avoid cyclic module dependencies
            from networking_odl.ml2 import ovsdb_topology
            elements = [ovsdb_topology.OvsdbNetworkTopologyElement()]

        # TODO(Federico Ressi): in the case there are more candidate virtual
        # switches instances for the same host it choses one for binding
        # port. As there isn't any know way to perform this selection it
        # selects a VIF type that is valid for all switches that have
        # been found and a VIF type valid for all them. This has to be improved
        for vif_type in self.valid_vif_types:
            vif_type_is_valid_for_all = True
            for element in elements:
                if vif_type not in element.valid_vif_types:
                    # it is invalid for at least one element: discard it
                    vif_type_is_valid_for_all = False
                    break

            if vif_type_is_valid_for_all:
                # This is the best VIF type valid for all elements
                LOG.debug(
                    "Found VIF type %(vif_type)r valid for all network "
                    "topology elements for host %(host_name)r.",
                    {'vif_type': vif_type, 'host_name': host_name})

                for element in elements:
                    # It assumes that any element could be good for given host
                    # In most of the cases I expect exactely one element for
                    # every compute host
                    try:
                        return element.bind_port(
                            port_context, vif_type, self._vif_details)

                    except Exception:
                        LOG.exception(
                            _LE('Network topology element has failed binding '
                                'port:\n%(element)s'),
                            {'element': element.to_json()})

        LOG.error(
            _LE('Unable to bind port element for given host and valid VIF '
                'types:\n'
                '\thostname: %(host_name)s\n'
                '\tvalid VIF types: %(valid_vif_types)s'),
            {'host_name': host_name,
             'valid_vif_types': ', '.join(self.valid_vif_types)})