def _validate_and_cleanse_policy(self, context, policy_data):
        """Validate every field of the policy and raise exceptions if any.

        This is a simple syntax validation. Actual funtional validation is
        performed at the backend by BCF and error message returned is bubbled
        up to the Horizon GUI.

        :param context: context of the transaction
        :param policy_data: the policy resource to be validated
        """
        policy_data['tenant_id'] = Util.get_tenant_id_for_create(
            context, policy_data)
        V4ANY = '0.0.0.0/0'
        CIDRALL = ['any', 'external']
        source = (V4ANY if policy_data['source'] in CIDRALL else
                  policy_data['source'])
        destination = (V4ANY if policy_data['destination'] in CIDRALL else
                       policy_data['destination'])
        errors = [
            validators.validate_subnet(source),
            validators.validate_subnet(destination),
            self._validate_nexthops(policy_data['nexthops']),
            self._validate_action(policy_data['action']),
            self._validate_priority(policy_data['priority']),
            self._validate_port_number(policy_data['source_port']),
            self._validate_port_number(policy_data['destination_port']),
            self._validate_port_protocol(policy_data)
        ]
        errors = [m for m in errors if m]
        if errors:
            LOG.debug(errors)
            raise n_exc.InvalidInput(error_message=errors)

        return self._cleanse_policy(policy_data)
Пример #2
0
 def create_router(self, context, router):
     self._warn_on_state_status(router['router'])
     # this also validates if the current tenant can create this router
     tenant_id = Util.get_tenant_id_for_create(context, router['router'])
     # cache the transaction_id
     bsn_transaction_id = uuidutils.generate_uuid()
     # add this unique identifier to the router object upstream, so that it
     # reaches the pre-commit callback
     router['router'][BSN_TRANSACTION_ID] = bsn_transaction_id
     try:
         new_router = super(L3RestProxy,
                            self).create_router(context, router)
         return new_router
     except Exception:
         with excutils.save_and_reraise_exception():
             try:
                 router_id = self.txn_cache.remove_transaction(
                     bsn_transaction_id)
                 self.servers.rest_delete_router(tenant_id, router_id)
             except Exception as e:
                 LOG.error(
                     _LE("Cannot clean up the router object created "
                         "on BCF. Exception: %(exc)s"), {'exc': e})
     finally:
         self.txn_cache.remove_transaction(bsn_transaction_id)
Пример #3
0
 def get_connection_source(self, unicode_mode=False):
     source = {}
     if unicode_mode:
         if not self.src_tenant_id:
             raise ReachabilityTestUnicodeTenantIdMissing(
                 test_name=self.name)
         source['tenant'] = self.src_tenant_id
         if not self.src_segment_id:
             raise ReachabilityTestUnicodeSegmentIdMissing(
                 test_name=self.name)
         source['segment'] = self.src_segment_id
     else:
         source['tenant'] = Util.format_resource_name(self.src_tenant_name)
         if not self.src_segment_name:
             raise ReachabilityTestSegmentNameMissing(test_name=self.name)
         source['segment'] = Util.format_resource_name(
             self.src_segment_name)
     source['ip'] = self.src_ip
     return source
Пример #4
0
    def _update_tenant_cache(self, reconcile=True, ratelimit=False):
        if ratelimit is True and self._last_keystone_sync_time is not None:
            if time.time() - self._last_keystone_sync_time <= \
                    KEYSTONE_SYNC_RATE_LIMIT:
                return

        try:
            auth = v3.Password(auth_url=self.auth_url,
                               username=self.auth_user,
                               password=self.auth_password,
                               project_name=self.auth_tenant,
                               user_domain_name=self.user_domain_name,
                               project_domain_name=self.project_domain_name)
            sess = session.Session(auth=auth)
            keystone_client = ksclient.Client(session=sess)
            tenants = keystone_client.projects.list()
            new_cached_tenants = {
                tn.id: Util.format_resource_name(tn.name)
                for tn in tenants
            }
            # Add SERVICE_TENANT to handle hidden network for VRRP
            new_cached_tenants[SERVICE_TENANT] = SERVICE_TENANT

            LOG.debug("New TENANTS: %s \nPrevious Tenants %s",
                      new_cached_tenants, self.keystone_tenants)
            diff = DictDiffer(new_cached_tenants, self.keystone_tenants)
            self.keystone_tenants = new_cached_tenants
            if reconcile:
                for tenant_id in diff.added():
                    LOG.debug("TENANT create: id %s name %s", tenant_id,
                              self.keystone_tenants[tenant_id])
                    self._rest_create_tenant(tenant_id)
                for tenant_id in diff.removed():
                    LOG.debug("TENANT delete: id %s", tenant_id)
                    self.rest_delete_tenant(tenant_id)
                if diff.changed():
                    hash_handler = cdb.HashHandler()
                    res = hash_handler._get_current_record()
                    if res:
                        lock_owner = hash_handler._get_lock_owner(res.hash)
                        if lock_owner and cdb.DBLOCK_PREFIX_TOPO in lock_owner:
                            # topology sync is still going on
                            return True
                    LOG.debug("TENANT changed: force topo sync")
                    hash_handler.put_hash('initial:hash,code')
            return True
        except Exception:
            LOG.exception("Encountered an error syncing with keystone.")
            return False
        finally:
            self._last_keystone_sync_time = time.time()
    def _update_tenant_cache(self, reconcile=True):
        try:
            auth = v3.Password(auth_url=self.auth_url,
                               username=self.auth_user,
                               password=self.auth_password,
                               project_name=self.auth_tenant,
                               user_domain_name=self.user_domain_name,
                               project_domain_name=self.project_domain_name)
            sess = session.Session(auth=auth)
            keystone_client = ksclient.Client(session=sess)
            tenants = keystone_client.projects.list()
            new_cached_tenants = {tn.id: Util.format_resource_name(tn.name)
                                  for tn in tenants}
            # Add SERVICE_TENANT to handle hidden network for VRRP
            new_cached_tenants[SERVICE_TENANT] = SERVICE_TENANT

            LOG.debug("New TENANTS: %s \nPrevious Tenants %s",
                      new_cached_tenants, self.keystone_tenants)
            diff = DictDiffer(new_cached_tenants, self.keystone_tenants)
            self.keystone_tenants = new_cached_tenants
            if reconcile:
                for tenant_id in diff.added():
                    LOG.debug("TENANT create: id %s name %s",
                              tenant_id, self.keystone_tenants[tenant_id])
                    self._rest_create_tenant(tenant_id)
                for tenant_id in diff.removed():
                    LOG.debug("TENANT delete: id %s", tenant_id)
                    self.rest_delete_tenant(tenant_id)
                if diff.changed():
                    hash_handler = cdb.HashHandler()
                    res = hash_handler._get_current_record()
                    if res:
                        lock_owner = hash_handler._get_lock_owner(res.hash)
                        if lock_owner and cdb.DBLOCK_PREFIX_TOPO in lock_owner:
                            # topology sync is still going on
                            return True
                    LOG.debug("TENANT changed: force topo sync")
                    hash_handler.put_hash('initial:hash,code')
            return True
        except Exception:
            LOG.exception("Encountered an error syncing with keystone.")
            return False
Пример #6
0
    def _update_tenant_cache(self, reconcile=True, ratelimit=False):
        if ratelimit is True and self._last_keystone_sync_time is not None:
            if time.time() - self._last_keystone_sync_time <= \
                    KEYSTONE_SYNC_RATE_LIMIT:
                return

        try:
            auth = v3.Password(auth_url=self.auth_url,
                               username=self.auth_user,
                               password=self.auth_password,
                               project_name=self.auth_tenant,
                               user_domain_name=self.user_domain_name,
                               project_domain_name=self.project_domain_name)
            sess = session.Session(auth=auth)
            keystone_client = ksclient.Client(session=sess)
            tenants = keystone_client.projects.list()
            new_cached_tenants = {tn.id: Util.format_resource_name(tn.name)
                                  for tn in tenants}
            # Add SERVICE_TENANT to handle hidden network for VRRP
            new_cached_tenants[SERVICE_TENANT] = SERVICE_TENANT

            LOG.debug("New TENANTS: %s \nPrevious Tenants %s",
                      new_cached_tenants, self.keystone_tenants)
            diff = DictDiffer(new_cached_tenants, self.keystone_tenants)
            self.keystone_tenants = new_cached_tenants
            if reconcile:
                for tenant_id in diff.added():
                    LOG.debug("TENANT create: id %s name %s",
                              tenant_id, self.keystone_tenants[tenant_id])
                    self._rest_create_tenant(tenant_id)
                for tenant_id in diff.removed():
                    LOG.debug("TENANT delete: id %s", tenant_id)
                    self.rest_delete_tenant(tenant_id)
                if diff.changed():
                    LOG.debug("Tenant cache outdated. Forcing topo_sync.")
                    self.force_topo_sync(check_ts=False)
            return True
        except Exception:
            LOG.exception("Encountered an error syncing with keystone.")
            return False
        finally:
            self._last_keystone_sync_time = time.time()
    def rest_call(self, action, resource, data, headers, ignore_codes,
                  timeout=False):
        context = self.get_context_ref()
        if context:
            # include the requesting context information if available
            cdict = context.to_dict()
            # remove the auth token so it's not present in debug logs on the
            # backend controller
            cdict.pop('auth_token', None)
            if ('tenant_name' in cdict and cdict['tenant_name']):
                cdict['tenant_name'] = Util.format_resource_name(
                    cdict['tenant_name'])
            headers[REQ_CONTEXT_HEADER] = jsonutils.dumps(cdict)
        hash_handler = cdb.HashHandler()
        good_first = sorted(self.servers, key=lambda x: x.failed)
        first_response = None
        for active_server in good_first:
            LOG.debug("ServerProxy: %(action)s to servers: "
                      "%(server)r, %(resource)s",
                      {'action': action,
                       'server': (active_server.server,
                                  active_server.port),
                       'resource': resource})
            for x in range(HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1):
                ret = active_server.rest_call(action, resource, data, headers,
                                              timeout,
                                              reconnect=self.always_reconnect,
                                              hash_handler=hash_handler)
                if ret[0] != httplib.SERVICE_UNAVAILABLE:
                    break
                time.sleep(HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)

            # If inconsistent, do a full synchronization
            if ret[0] == httplib.CONFLICT and hash_handler.is_db_lock_owner():
                if not self.get_topo_function:
                    raise cfg.Error(_('Server requires synchronization, '
                                      'but no topology function was defined.'))

                LOG.info("ServerProxy: HashConflict detected with request "
                         "%(action)s %(resource)s Starting Topology sync",
                         {'action': action, 'resource': resource})
                topo_hh = self.dblock_mark_toposync_started(hash_handler)
                try:
                    data = self.get_topo_function(
                        **self.get_topo_function_args)
                    if data:
                        ret_ts = active_server.rest_call('POST', TOPOLOGY_PATH,
                                                         data, timeout=None,
                                                         hash_handler=topo_hh)
                        if self.server_failure(ret_ts, ignore_codes):
                            LOG.error("ServerProxy: Topology sync failed")
                            raise RemoteRestError(reason=ret_ts[2],
                                                  status=ret_ts[0])
                finally:
                    LOG.info("ServerProxy: Topology sync completed")
                    if data is None:
                        return None
            elif ret[0] == httplib.CONFLICT and \
                    not hash_handler.is_db_lock_owner():
                # DB lock ownership lost, allow current owner to detect hash
                # conflict and perform needed TopoSync
                LOG.warning("HashConflict detected but thread is no longer"
                            " DB lock owner. Skipping TopoSync call")

            # Store the first response as the error to be bubbled up to the
            # user since it was a good server. Subsequent servers will most
            # likely be cluster slaves and won't have a useful error for the
            # user (e.g. 302 redirect to master)
            if not first_response:
                first_response = ret
            if not self.server_failure(ret, ignore_codes):
                active_server.failed = False
                LOG.debug("ServerProxy: %(action)s succeed for servers: "
                          "%(server)r Response: %(response)s",
                          {'action': action,
                           'server': (active_server.server,
                                      active_server.port),
                           'response': ret[3]})
                return ret
            else:
                LOG.warning('ServerProxy: %(action)s failure for servers:'
                            '%(server)r Response: %(response)s',
                            {'action': action,
                             'server': (active_server.server,
                                        active_server.port),
                             'response': ret[3]})
                LOG.warning("ServerProxy: Error details: "
                            "status=%(status)d, reason=%(reason)r, "
                            "ret=%(ret)s, data=%(data)r",
                            {'status': ret[0], 'reason': ret[1],
                             'ret': ret[2], 'data': ret[3]})
                active_server.failed = True

        # A failure on a delete means the object is gone from Neutron but not
        # from the controller. Set the consistency hash to a bad value to
        # trigger a sync on the next check.
        # NOTE: The hash must have a comma in it otherwise it will be ignored
        # by the backend.
        if action == 'DELETE':
            hash_handler.put_hash('INCONSISTENT,INCONSISTENT')
        # All servers failed, reset server list and try again next time
        LOG.error('ServerProxy: %(action)s failure for all servers: '
                  '%(server)r',
                  {'action': action,
                   'server': tuple((s.server,
                                    s.port) for s in self.servers)})
        return first_response
 def get_connection_source(self):
     source = {}
     source['tenant'] = Util.format_resource_name(self.src_tenant_name)
     source['segment'] = Util.format_resource_name(self.src_segment_name)
     source['ip'] = self.src_ip
     return source
Пример #9
0
 def get_connection_source(self):
     source = {}
     source['tenant'] = Util.format_resource_name(self.src_tenant_name)
     source['segment'] = Util.format_resource_name(self.src_segment_name)
     source['ip'] = self.src_ip
     return source
Пример #10
0
    def rest_call(self,
                  action,
                  resource,
                  data,
                  headers,
                  ignore_codes,
                  timeout=False):
        context = self.get_context_ref()
        if context:
            # include the requesting context information if available
            cdict = context.to_dict()
            # remove the auth token so it's not present in debug logs on the
            # backend controller
            cdict.pop('auth_token', None)
            if ('tenant_name' in cdict and cdict['tenant_name']):
                cdict['tenant_name'] = Util.format_resource_name(
                    cdict['tenant_name'])
            headers[REQ_CONTEXT_HEADER] = jsonutils.dumps(cdict)
        hash_handler = cdb.HashHandler()
        good_first = sorted(self.servers, key=lambda x: x.failed)
        first_response = None
        for active_server in good_first:
            LOG.debug(
                "ServerProxy: %(action)s to servers: "
                "%(server)r, %(resource)s", {
                    'action': action,
                    'server': (active_server.server, active_server.port),
                    'resource': resource
                })
            for x in range(HTTP_SERVICE_UNAVAILABLE_RETRY_COUNT + 1):
                ret = active_server.rest_call(action,
                                              resource,
                                              data,
                                              headers,
                                              timeout,
                                              reconnect=self.always_reconnect,
                                              hash_handler=hash_handler)
                if ret[0] != httplib.SERVICE_UNAVAILABLE:
                    break
                eventlet.sleep(HTTP_SERVICE_UNAVAILABLE_RETRY_INTERVAL)

            # If inconsistent, do a full synchronization
            if ret[0] == httplib.CONFLICT and hash_handler.is_db_lock_owner():
                if not self.get_topo_function:
                    raise cfg.Error(
                        _('Server requires synchronization, '
                          'but no topology function was defined.'))

                LOG.info(
                    "ServerProxy: HashConflict detected with request "
                    "%(action)s %(resource)s Starting Topology sync", {
                        'action': action,
                        'resource': resource
                    })
                topo_hh = self.dblock_mark_toposync_started(hash_handler)
                try:
                    data = self.get_topo_function(
                        **self.get_topo_function_args)
                    if data:
                        ret_ts = active_server.rest_call('POST',
                                                         TOPOLOGY_PATH,
                                                         data,
                                                         timeout=None,
                                                         hash_handler=topo_hh)
                        if self.server_failure(ret_ts, ignore_codes):
                            LOG.error("ServerProxy: Topology sync failed")
                            raise RemoteRestError(reason=ret_ts[2],
                                                  status=ret_ts[0])
                finally:
                    LOG.info("ServerProxy: Topology sync completed")
                    if data is None:
                        return None
            elif ret[0] == httplib.CONFLICT and \
                    not hash_handler.is_db_lock_owner():
                # DB lock ownership lost, allow current owner to detect hash
                # conflict and perform needed TopoSync
                LOG.warning("HashConflict detected but thread is no longer"
                            " DB lock owner. Skipping TopoSync call")

            # Store the first response as the error to be bubbled up to the
            # user since it was a good server. Subsequent servers will most
            # likely be cluster slaves and won't have a useful error for the
            # user (e.g. 302 redirect to master)
            if not first_response:
                first_response = ret
            if not self.server_failure(ret, ignore_codes):
                active_server.failed = False
                LOG.debug(
                    "ServerProxy: %(action)s succeed for servers: "
                    "%(server)r Response: %(response)s", {
                        'action': action,
                        'server': (active_server.server, active_server.port),
                        'response': ret[3]
                    })
                return ret
            else:
                LOG.warning(
                    'ServerProxy: %(action)s failure for servers:'
                    '%(server)r Response: %(response)s', {
                        'action': action,
                        'server': (active_server.server, active_server.port),
                        'response': ret[3]
                    })
                LOG.warning(
                    "ServerProxy: Error details: "
                    "status=%(status)d, reason=%(reason)r, "
                    "ret=%(ret)s, data=%(data)r", {
                        'status': ret[0],
                        'reason': ret[1],
                        'ret': ret[2],
                        'data': ret[3]
                    })
                active_server.failed = True

        # A failure on a delete means the object is gone from Neutron but not
        # from the controller. Set the consistency hash to a bad value to
        # trigger a sync on the next check.
        # NOTE: The hash must have a comma in it otherwise it will be ignored
        # by the backend.
        if action == 'DELETE':
            hash_handler.put_hash('INCONSISTENT,INCONSISTENT')
        # All servers failed, reset server list and try again next time
        LOG.error(
            'ServerProxy: %(action)s failure for all servers: '
            '%(server)r', {
                'action': action,
                'server': tuple((s.server, s.port) for s in self.servers)
            })
        return first_response