Beispiel #1
0
def main():
    config.init(app.common_opts, sys.argv[1:])
    application = app.setup_app()

    host = CONF.bind_host
    port = CONF.bind_port
    workers = CONF.api_workers

    if workers < 1:
        LOG.warning(_LW("Wrong worker number, worker = %(workers)s"), workers)
        workers = 1

    LOG.info(_LI("Cinder_APIGW on http://%(host)s:%(port)s with %(workers)s"),
             {
                 'host': host,
                 'port': port,
                 'workers': workers
             })

    service = wsgi.Server(CONF, 'Tricircle Cinder_APIGW', application, host,
                          port)
    restapp.serve(service, CONF, workers)

    LOG.info(_LI("Configuration:"))
    CONF.log_opt_values(LOG, std_logging.INFO)

    restapp.wait()
 def _get_compute_node(self, context):
     """Returns compute node for the host and nodename."""
     try:
         return objects.ComputeNode.get_by_host_and_nodename(
             context, self.host, utils.get_node_name(self.host))
     except exception.NotFound:
         LOG.warning(_LW("No compute node record for %(host)s:%(node)s"),
                     {'host': self.host,
                      'node': utils.get_node_name(self.host)})
Beispiel #3
0
 def wrapped(*args, **kwargs):
     while True:
         try:
             return f(*args, **kwargs)
         except db_exc.DBDeadlock:
             LOG.warning(_LW("Deadlock detected when running "
                             "'%(func_name)s': Retrying..."),
                         dict(func_name=f.__name__))
             # Retry!
             time.sleep(0.5)
             continue
Beispiel #4
0
 def wrapped(*args, **kwargs):
     while True:
         try:
             return f(*args, **kwargs)
         except db_exc.DBDeadlock:
             LOG.warning(_LW("Deadlock detected when running "
                             "'%(func_name)s': Retrying..."),
                         dict(func_name=f.__name__))
             # Retry!
             time.sleep(0.5)
             continue
    def _prep_block_device(self, context, instance, bdms,
                           do_check_attach=True):
        """Set up the block device for an instance with error logging."""
        try:
            block_device_info = {
                'root_device_name': instance['root_device_name'],
                'swap': driver_block_device.convert_swap(bdms),
                'ephemerals': driver_block_device.convert_ephemerals(bdms),
                'block_device_mapping': (
                    driver_block_device.attach_block_devices(
                        driver_block_device.convert_volumes(bdms),
                        context, instance, self.volume_api,
                        self.driver, do_check_attach=do_check_attach) +
                    driver_block_device.attach_block_devices(
                        driver_block_device.convert_snapshots(bdms),
                        context, instance, self.volume_api,
                        self.driver, self._await_block_device_map_created,
                        do_check_attach=do_check_attach) +
                    driver_block_device.attach_block_devices(
                        driver_block_device.convert_images(bdms),
                        context, instance, self.volume_api,
                        self.driver, self._await_block_device_map_created,
                        do_check_attach=do_check_attach) +
                    driver_block_device.attach_block_devices(
                        driver_block_device.convert_blanks(bdms),
                        context, instance, self.volume_api,
                        self.driver, self._await_block_device_map_created,
                        do_check_attach=do_check_attach))
            }

            if self.use_legacy_block_device_info:
                for bdm_type in ('swap', 'ephemerals', 'block_device_mapping'):
                    block_device_info[bdm_type] = \
                        driver_block_device.legacy_block_devices(
                            block_device_info[bdm_type])

            # Get swap out of the list
            block_device_info['swap'] = driver_block_device.get_swap(
                block_device_info['swap'])
            return block_device_info

        except exception.OverQuota:
            msg = _LW('Failed to create block device for instance due to '
                      'being over volume resource quota')
            LOG.warn(msg, instance=instance)
            raise exception.InvalidBDM()

        except Exception:
            LOG.exception(_LE('Instance failed block device setup'),
                          instance=instance)
            raise exception.InvalidBDM()
Beispiel #6
0
 def handle_args(*args, **kwargs):
     instance, resource, context = args[:3]
     if resource not in instance.operation_resources_map[
             operation_name]:
         raise exceptions.ResourceNotSupported(resource, operation_name)
     retries = 1
     for i in xrange(retries + 1):
         try:
             service = instance.resource_service_map[resource]
             instance._ensure_endpoint_set(context, service)
             return func(*args, **kwargs)
         except exceptions.EndpointNotAvailable as e:
             if i == retries:
                 raise
             if cfg.CONF.client.auto_refresh_endpoint:
                 LOG.warning(
                     _LW('%(exception)s, '
                         'update endpoint and try again'),
                     {'exception': e.message})
                 instance._update_endpoint_from_keystone(context, True)
             else:
                 raise
         except exceptions.EndpointNotFound as e:
             # NOTE(zhiyuan) endpoints are not registered in Keystone
             # for the given pod and service, we add default behaviours
             # for the handle functions
             if i < retries and cfg.CONF.client.auto_refresh_endpoint:
                 LOG.warning(
                     _LW('%(exception)s, '
                         'update endpoint and try again'),
                     {'exception': e.message})
                 instance._update_endpoint_from_keystone(context, True)
                 continue
             if operation_name == 'list':
                 return []
             else:
                 return None
    def _cleanup_volumes(self, context, instance_uuid, bdms, raise_exc=True):
        exc_info = None

        for bdm in bdms:
            LOG.debug("terminating bdm %s", bdm,
                      instance_uuid=instance_uuid)
            if bdm.volume_id and bdm.delete_on_termination:
                try:
                    self.volume_api.delete(context, bdm.volume_id)
                except Exception as exc:
                    exc_info = sys.exc_info()
                    LOG.warn(_LW('Failed to delete volume: %(volume_id)s due '
                                 'to %(exc)s'), {'volume_id': bdm.volume_id,
                                                 'exc': unicode(exc)})
        if exc_info is not None and raise_exc:
            six.reraise(exc_info[0], exc_info[1], exc_info[2])
Beispiel #8
0
def main():
    config.init(xservice.common_opts, sys.argv[1:])

    host = CONF.host
    workers = CONF.workers

    if workers < 1:
        LOG.warning(_LW("Wrong worker number, worker = %(workers)s"), workers)
        workers = 1

    LOG.info(_LI("XJob Server on http://%(host)s with %(workers)s"),
             {'host': host, 'workers': workers})

    xservice.serve(xservice.create_service(), workers)

    LOG.info(_LI("Configuration:"))
    CONF.log_opt_values(LOG, std_logging.INFO)

    xservice.wait()
Beispiel #9
0
def main():
    config.init(xservice.common_opts, sys.argv[1:])

    host = CONF.host
    workers = CONF.workers

    if workers < 1:
        LOG.warning(_LW("Wrong worker number, worker = %(workers)s"), workers)
        workers = 1

    LOG.info(_LI("XJob Server on http://%(host)s with %(workers)s"), {
        'host': host,
        'workers': workers
    })

    xservice.serve(xservice.create_service(), workers)

    LOG.info(_LI("Configuration:"))
    CONF.log_opt_values(LOG, logging.INFO)

    xservice.wait()
Beispiel #10
0
def main():
    config.init(sys.argv[1:])
    config.setup_logging()
    application = app.setup_app()

    host = CONF.bind_host
    port = CONF.bind_port
    workers = CONF.api_workers

    if workers < 1:
        LOG.warning(_LW("Wrong worker number, worker = %(workers)s"), workers)
        workers = 1

    LOG.info(_LI("Server on http://%(host)s:%(port)s with %(workers)s"),
             {'host': host, 'port': port, 'workers': workers})

    serving.run_simple(host, port,
                       application,
                       processes=workers)

    LOG.info(_LI("Configuration:"))
    CONF.log_opt_values(LOG, std_logging.INFO)
Beispiel #11
0
def main():
    config.init(app.common_opts, sys.argv[1:])
    application = app.setup_app()

    host = CONF.bind_host
    port = CONF.bind_port
    workers = CONF.api_workers

    if workers < 1:
        LOG.warning(_LW("Wrong worker number, worker = %(workers)s"), workers)
        workers = 1

    LOG.info(_LI("Admin API on http://%(host)s:%(port)s with %(workers)s"),
             {'host': host, 'port': port, 'workers': workers})

    service = wsgi.Server(CONF, 'Tricircle Admin_API', application, host, port)
    restapp.serve(service, CONF, workers)

    LOG.info(_LI("Configuration:"))
    CONF.log_opt_values(LOG, std_logging.INFO)

    restapp.wait()
Beispiel #12
0
    def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net,
                              t_router, t_bridge_net, t_bridge_subnet,
                              is_ext_net_pod):
        # NOTE(zhiyuan) after the bridge network combination, external network
        # is attached to a separate router, which is created in central plugin,
        # so is_ext_net_pod is not used in the current implementation, but we
        # choose to keep this parameter since it's an important attribute of a
        # pod and we may need to use it later.
        b_client = self._get_client(b_pod['region_name'])

        is_distributed = t_router.get('distributed', False)
        router_body = {'router': {'name': t_router['id'],
                                  'distributed': is_distributed}}
        project_id = t_router['tenant_id']

        # create bottom router in target bottom pod
        _, b_router_id = self.helper.prepare_bottom_element(
            ctx, project_id, b_pod, t_router, constants.RT_ROUTER, router_body)

        # create top bridge port
        q_ctx = None  # no need to pass neutron context when using client
        t_bridge_port_id = self.helper.get_bridge_interface(
            ctx, q_ctx, project_id, t_pod, t_bridge_net['id'], b_router_id)

        # create bottom bridge port
        # if target bottom pod is hosting real external network, we create
        # another bottom router and attach the bridge network as internal
        # network, but this work is done by central plugin when user sets
        # router gateway.
        t_bridge_port = t_client.get_ports(ctx, t_bridge_port_id)
        (is_new, b_bridge_port_id, b_bridge_subnet_id,
         b_bridge_net_id) = self.helper.get_bottom_bridge_elements(
            ctx, project_id, b_pod, t_bridge_net, True, t_bridge_subnet, None)

        # we attach the bridge port as router gateway
        # add_gateway is update operation, which can run multiple times
        gateway_ip = t_bridge_port['fixed_ips'][0]['ip_address']
        b_client.action_routers(
            ctx, 'add_gateway', b_router_id,
            {'network_id': b_bridge_net_id,
             'enable_snat': False,
             'external_fixed_ips': [{'subnet_id': b_bridge_subnet_id,
                                     'ip_address': gateway_ip}]})

        # attach internal port to bottom router
        t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'],
                                              t_net['id'])
        b_net_id = db_api.get_bottom_id_by_top_id_region_name(
            ctx, t_net['id'], b_pod['region_name'], constants.RT_NETWORK)
        if b_net_id:
            b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
                                                  b_net_id)
        else:
            b_ports = []
        if not t_ports and b_ports:
            # remove redundant bottom interface
            b_port = b_ports[0]
            request_body = {'port_id': b_port['id']}
            b_client.action_routers(ctx, 'remove_interface', b_router_id,
                                    request_body)
        elif t_ports and not b_ports:
            # create new bottom interface
            t_port = t_ports[0]

            # only consider ipv4 address currently
            t_subnet_id = t_port['fixed_ips'][0]['subnet_id']
            t_subnet = t_client.get_subnets(ctx, t_subnet_id)

            if CONF.enable_api_gateway:
                (b_net_id,
                 subnet_map) = self.helper.prepare_bottom_network_subnets(
                    ctx, q_ctx, project_id, b_pod, t_net, [t_subnet])
            else:
                (b_net_id,
                 subnet_map) = (t_net['id'], {t_subnet['id']: t_subnet['id']})

            # the gateway ip of bottom subnet is set to the ip of t_port, so
            # we just attach the bottom subnet to the bottom router and neutron
            # server in the bottom pod will create the interface for us, using
            # the gateway ip.
            b_client.action_routers(ctx, 'add_interface', b_router_id,
                                    {'subnet_id': subnet_map[t_subnet_id]})

        if not t_router['external_gateway_info']:
            return

        # handle floatingip
        t_ext_net_id = t_router['external_gateway_info']['network_id']
        t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id',
                                                  'comparator': 'eq',
                                                  'value': t_ext_net_id}])
        # skip unbound top floatingip
        t_ip_fip_map = dict([(fip['floating_ip_address'],
                              fip) for fip in t_fips if fip['port_id']])
        mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id,
                                                        constants.RT_NETWORK)
        # bottom external network should exist
        b_ext_pod, b_ext_net_id = mappings[0]
        b_ext_client = self._get_client(b_ext_pod['region_name'])
        b_fips = b_ext_client.list_floatingips(
            ctx, [{'key': 'floating_network_id', 'comparator': 'eq',
                   'value': b_ext_net_id}])
        b_ip_fip_map = dict([(fip['floating_ip_address'],
                              fip) for fip in b_fips])
        add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map]
        del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map]

        for add_fip in add_fips:
            fip = t_ip_fip_map[add_fip]
            t_int_port_id = fip['port_id']
            b_int_port_id = db_api.get_bottom_id_by_top_id_region_name(
                ctx, t_int_port_id, b_pod['region_name'], constants.RT_PORT)
            if not b_int_port_id:
                LOG.warning(_LW('Port %(port_id)s associated with floating ip '
                                '%(fip)s is not mapped to bottom pod'),
                            {'port_id': t_int_port_id, 'fip': add_fip})
                continue
            t_int_port = t_client.get_ports(ctx, t_int_port_id)
            if t_int_port['network_id'] != t_net['id']:
                # only handle floating ip association for the given top network
                continue

            if b_ext_pod['pod_id'] != b_pod['pod_id']:
                # if the internal port is not located in the external network
                # pod, we need to create a copied port in that pod for floating
                # ip association purpose
                t_int_net_id = t_int_port['network_id']
                t_int_subnet_id = t_int_port['fixed_ips'][0]['subnet_id']
                port_body = {
                    'port': {
                        'tenant_id': project_id,
                        'admin_state_up': True,
                        'name': constants.shadow_port_name % t_int_port['id'],
                        'network_id': t_int_net_id,
                        'fixed_ips': [{'ip_address': t_int_port[
                            'fixed_ips'][0]['ip_address']}]
                    }
                }
                self.helper.prepare_bottom_element(
                    ctx, project_id, b_ext_pod, t_int_port,
                    constants.RT_SD_PORT, port_body)
                # create routing entries for copied network and subnet so we
                # can easily find them during central network and subnet
                # deletion, create_resource_mapping will catch DBDuplicateEntry
                # exception and ignore it so it's safe to call this function
                # multiple times
                db_api.create_resource_mapping(ctx, t_int_net_id, t_int_net_id,
                                               b_ext_pod['pod_id'], project_id,
                                               constants.RT_SD_NETWORK)
                db_api.create_resource_mapping(ctx, t_int_subnet_id,
                                               t_int_subnet_id,
                                               b_ext_pod['pod_id'], project_id,
                                               constants.RT_SD_SUBNET)

            self._safe_create_bottom_floatingip(
                ctx, b_pod, b_ext_client, b_ext_net_id, add_fip,
                b_int_port_id)

        for del_fip in del_fips:
            fip = b_ip_fip_map[del_fip]
            if b_ext_pod['pod_id'] != b_pod['pod_id'] and fip['port_id']:
                # expire the routing entry for copy port
                with ctx.session.begin():
                    core.update_resources(
                        ctx, models.ResourceRouting,
                        [{'key': 'bottom_id', 'comparator': 'eq',
                          'value': fip['port_id']},
                         {'key': 'resource_type', 'comparator': 'eq',
                          'value': constants.RT_SD_PORT}],
                        {'bottom_id': None,
                         'created_at': constants.expire_time,
                         'updated_at': constants.expire_time})
                # delete copy port
                b_ext_client.delete_ports(ctx, fip['port_id'])
                # delete the expired entry, even if this deletion fails, we
                # still have a chance that lock_handle module will delete it
                with ctx.session.begin():
                    core.delete_resources(ctx, models.ResourceRouting,
                                          [{'key': 'top_id',
                                            'comparator': 'eq',
                                            'value': fip['port_id']},
                                           {'key': 'resource_type',
                                            'comparator': 'eq',
                                            'value': constants.RT_SD_PORT}])
                    # delete port before floating ip disassociation, copy
                    # network and copy subnet are deleted during central
                    # network and subnet deletion
            b_ext_client.delete_floatingips(ctx, fip['id'])
Beispiel #13
0
        def handle_args(*args, **kwargs):
            if IN_TEST:
                # NOTE(zhiyuan) job mechanism will cause some unpredictable
                # result in unit test so we would like to bypass it. However
                # we have problem mocking a decorator which decorates member
                # functions, that's why we use this label, not an elegant
                # way though.
                func(*args, **kwargs)
                return
            ctx = args[1]
            payload = kwargs['payload']

            resource_id = payload[job_type]
            db_api.new_job(ctx, job_type, resource_id)
            start_time = datetime.datetime.now()

            while True:
                current_time = datetime.datetime.now()
                delta = current_time - start_time
                if delta.seconds >= CONF.worker_handle_timeout:
                    # quit when this handle is running for a long time
                    break
                time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
                                                       job_type, resource_id)
                time_success = db_api.get_latest_timestamp(
                    ctx, constants.JS_Success, job_type, resource_id)
                if time_success and time_success >= time_new:
                    break
                job = db_api.register_job(ctx, job_type, resource_id)
                if not job:
                    # fail to obtain the lock, let other worker handle the job
                    running_job = db_api.get_running_job(ctx, job_type,
                                                         resource_id)
                    if not running_job:
                        # there are two reasons that running_job is None. one
                        # is that the running job has just been finished, the
                        # other is that all workers fail to register the job
                        # due to deadlock exception. so we sleep and try again
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    job_time = running_job['timestamp']
                    current_time = datetime.datetime.now()
                    delta = current_time - job_time
                    if delta.seconds > CONF.job_run_expire:
                        # previous running job expires, we set its status to
                        # fail and try again to obtain the lock
                        db_api.finish_job(ctx, running_job['id'], False,
                                          time_new)
                        LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
                                        'resource %(resource)s expires, set '
                                        'its state to Fail'),
                                    {'job': running_job['id'],
                                     'job_type': job_type,
                                     'resource': resource_id})
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    else:
                        # previous running job is still valid, we just leave
                        # the job to the worker who holds the lock
                        break
                # successfully obtain the lock, start to execute handler
                try:
                    func(*args, **kwargs)
                except Exception:
                    db_api.finish_job(ctx, job['id'], False, time_new)
                    LOG.error(_LE('Job %(job)s of type %(job_type)s for '
                                  'resource %(resource)s fails'),
                              {'job': job['id'],
                               'job_type': job_type,
                               'resource': resource_id})
                    break
                db_api.finish_job(ctx, job['id'], True, time_new)
                eventlet.sleep(CONF.worker_sleep_time)
Beispiel #14
0
        def handle_args(*args, **kwargs):
            if IN_TEST:
                # NOTE(zhiyuan) job mechanism will cause some unpredictable
                # result in unit test so we would like to bypass it. However
                # we have problem mocking a decorator which decorates member
                # functions, that's why we use this label, not an elegant
                # way though.
                func(*args, **kwargs)
                return
            ctx = args[1]
            payload = kwargs['payload']

            resource_id = payload[job_type]
            db_api.new_job(ctx, job_type, resource_id)
            start_time = datetime.datetime.now()

            while True:
                current_time = datetime.datetime.now()
                delta = current_time - start_time
                if delta.seconds >= CONF.worker_handle_timeout:
                    # quit when this handle is running for a long time
                    break
                time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
                                                       job_type, resource_id)
                time_success = db_api.get_latest_timestamp(
                    ctx, constants.JS_Success, job_type, resource_id)
                if time_success and time_success >= time_new:
                    break
                job = db_api.register_job(ctx, job_type, resource_id)
                if not job:
                    # fail to obtain the lock, let other worker handle the job
                    running_job = db_api.get_running_job(ctx, job_type,
                                                         resource_id)
                    if not running_job:
                        # there are two reasons that running_job is None. one
                        # is that the running job has just been finished, the
                        # other is that all workers fail to register the job
                        # due to deadlock exception. so we sleep and try again
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    job_time = running_job['timestamp']
                    current_time = datetime.datetime.now()
                    delta = current_time - job_time
                    if delta.seconds > CONF.job_run_expire:
                        # previous running job expires, we set its status to
                        # fail and try again to obtain the lock
                        db_api.finish_job(ctx, running_job['id'], False,
                                          time_new)
                        LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
                                        'resource %(resource)s expires, set '
                                        'its state to Fail'),
                                    {'job': running_job['id'],
                                     'job_type': job_type,
                                     'resource': resource_id})
                        eventlet.sleep(CONF.worker_sleep_time)
                        continue
                    else:
                        # previous running job is still valid, we just leave
                        # the job to the worker who holds the lock
                        break
                # successfully obtain the lock, start to execute handler
                try:
                    func(*args, **kwargs)
                except Exception:
                    db_api.finish_job(ctx, job['id'], False, time_new)
                    LOG.error(_LE('Job %(job)s of type %(job_type)s for '
                                  'resource %(resource)s fails'),
                              {'job': job['id'],
                               'job_type': job_type,
                               'resource': resource_id})
                    break
                db_api.finish_job(ctx, job['id'], True, time_new)
                eventlet.sleep(CONF.worker_sleep_time)
Beispiel #15
0
    def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net,
                              t_router, t_ew_bridge_net, t_ew_bridge_subnet,
                              need_ns_bridge):
        b_client = self._get_client(b_pod['pod_name'])

        router_body = {'router': {'name': t_router['id'],
                                  'distributed': False}}
        project_id = t_router['tenant_id']

        # create bottom router in target bottom pod
        _, b_router_id = self.helper.prepare_bottom_element(
            ctx, project_id, b_pod, t_router, 'router', router_body)

        # handle E-W networking
        # create top E-W bridge port
        q_ctx = None  # no need to pass neutron context when using client
        t_ew_bridge_port_id = self.helper.get_bridge_interface(
            ctx, q_ctx, project_id, t_pod, t_ew_bridge_net['id'],
            b_router_id, None, True)

        # create bottom E-W bridge port
        t_ew_bridge_port = t_client.get_ports(ctx, t_ew_bridge_port_id)
        (is_new, b_ew_bridge_port_id,
         _, _) = self.helper.get_bottom_bridge_elements(
            ctx, project_id, b_pod, t_ew_bridge_net, False, t_ew_bridge_subnet,
            t_ew_bridge_port)

        # attach bottom E-W bridge port to bottom router
        if is_new:
            # only attach bridge port the first time
            b_client.action_routers(ctx, 'add_interface', b_router_id,
                                    {'port_id': b_ew_bridge_port_id})
        else:
            # still need to check if the bridge port is bound
            port = b_client.get_ports(ctx, b_ew_bridge_port_id)
            if not port.get('device_id'):
                b_client.action_routers(ctx, 'add_interface', b_router_id,
                                        {'port_id': b_ew_bridge_port_id})

        # handle N-S networking
        if need_ns_bridge:
            t_ns_bridge_net_name = constants.ns_bridge_net_name % project_id
            t_ns_bridge_subnet_name = constants.ns_bridge_subnet_name % (
                project_id)
            t_ns_bridge_net = self._get_resource_by_name(
                t_client, ctx, 'network', t_ns_bridge_net_name)
            t_ns_bridge_subnet = self._get_resource_by_name(
                t_client, ctx, 'subnet', t_ns_bridge_subnet_name)
            # create bottom N-S bridge network and subnet
            (_, _, b_ns_bridge_subnet_id,
             b_ns_bridge_net_id) = self.helper.get_bottom_bridge_elements(
                ctx, project_id, b_pod, t_ns_bridge_net, True,
                t_ns_bridge_subnet, None)
            # create top N-S bridge gateway port
            t_ns_bridge_gateway_id = self.helper.get_bridge_interface(
                ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'],
                b_router_id, None, False)
            t_ns_bridge_gateway = t_client.get_ports(ctx,
                                                     t_ns_bridge_gateway_id)
            # add external gateway for bottom router
            # add gateway is update operation, can run multiple times
            gateway_ip = t_ns_bridge_gateway['fixed_ips'][0]['ip_address']
            b_client.action_routers(
                ctx, 'add_gateway', b_router_id,
                {'network_id': b_ns_bridge_net_id,
                 'external_fixed_ips': [{'subnet_id': b_ns_bridge_subnet_id,
                                         'ip_address': gateway_ip}]})

        # attach internal port to bottom router
        t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'],
                                              t_net['id'])
        b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
            ctx, t_net['id'], b_pod['pod_name'], constants.RT_NETWORK)
        if b_net_id:
            b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
                                                  b_net_id)
        else:
            b_ports = []
        if not t_ports and b_ports:
            # remove redundant bottom interface
            b_port = b_ports[0]
            request_body = {'port_id': b_port['id']}
            b_client.action_routers(ctx, 'remove_interface', b_router_id,
                                    request_body)
        elif t_ports and not b_ports:
            # create new bottom interface
            t_port = t_ports[0]

            # only consider ipv4 address currently
            t_subnet_id = t_port['fixed_ips'][0]['subnet_id']
            t_subnet = t_client.get_subnets(ctx, t_subnet_id)

            if CONF.enable_api_gateway:
                (b_net_id,
                 subnet_map) = self.helper.prepare_bottom_network_subnets(
                    ctx, q_ctx, project_id, b_pod, t_net, [t_subnet])
            else:
                (b_net_id,
                 subnet_map) = (t_net['id'], {t_subnet['id']: t_subnet['id']})

            # the gateway ip of bottom subnet is set to the ip of t_port, so
            # we just attach the bottom subnet to the bottom router and neutron
            # server in the bottom pod will create the interface for us, using
            # the gateway ip.
            b_client.action_routers(ctx, 'add_interface', b_router_id,
                                    {'subnet_id': subnet_map[t_subnet_id]})

        if not t_router['external_gateway_info']:
            return

        # handle floatingip
        t_ext_net_id = t_router['external_gateway_info']['network_id']
        t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id',
                                                  'comparator': 'eq',
                                                  'value': t_ext_net_id}])
        # skip unbound top floatingip
        t_ip_fip_map = dict([(fip['floating_ip_address'],
                              fip) for fip in t_fips if fip['port_id']])
        mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id,
                                                        constants.RT_NETWORK)
        # bottom external network should exist
        b_ext_pod, b_ext_net_id = mappings[0]
        b_ext_client = self._get_client(b_ext_pod['pod_name'])
        b_fips = b_ext_client.list_floatingips(
            ctx, [{'key': 'floating_network_id', 'comparator': 'eq',
                   'value': b_ext_net_id}])
        b_ip_fip_map = dict([(fip['floating_ip_address'],
                              fip) for fip in b_fips])
        add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map]
        del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map]

        for add_fip in add_fips:
            fip = t_ip_fip_map[add_fip]
            t_int_port_id = fip['port_id']
            b_int_port_id = db_api.get_bottom_id_by_top_id_pod_name(
                ctx, t_int_port_id, b_pod['pod_name'], constants.RT_PORT)
            if not b_int_port_id:
                LOG.warning(_LW('Port %(port_id)s associated with floating ip '
                                '%(fip)s is not mapped to bottom pod'),
                            {'port_id': t_int_port_id, 'fip': add_fip})
                continue
            t_int_port = t_client.get_ports(ctx, t_int_port_id)
            if t_int_port['network_id'] != t_net['id']:
                # only handle floating ip association for the given top network
                continue
            if need_ns_bridge:
                # create top N-S bridge interface port
                t_ns_bridge_port_id = self.helper.get_bridge_interface(
                    ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], None,
                    b_int_port_id, False)
                t_ns_bridge_port = t_client.get_ports(ctx, t_ns_bridge_port_id)
                b_ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
                    ctx, t_ns_bridge_net['id'], b_ext_pod['pod_name'],
                    constants.RT_NETWORK)
                port_body = {
                    'port': {
                        'tenant_id': project_id,
                        'admin_state_up': True,
                        'name': 'ns_bridge_port',
                        'network_id': b_ext_bridge_net_id,
                        'fixed_ips': [{'ip_address': t_ns_bridge_port[
                            'fixed_ips'][0]['ip_address']}]
                    }
                }
                _, b_ns_bridge_port_id = self.helper.prepare_bottom_element(
                    ctx, project_id, b_ext_pod, t_ns_bridge_port,
                    constants.RT_PORT, port_body)
                # swap these two lines
                self._safe_create_bottom_floatingip(
                    ctx, b_pod, b_client, b_ns_bridge_net_id,
                    t_ns_bridge_port['fixed_ips'][0]['ip_address'],
                    b_int_port_id)
                self._safe_create_bottom_floatingip(
                    ctx, b_ext_pod, b_ext_client, b_ext_net_id, add_fip,
                    b_ns_bridge_port_id)
            else:
                self._safe_create_bottom_floatingip(
                    ctx, b_pod, b_client, b_ext_net_id, add_fip,
                    b_int_port_id)

        for del_fip in del_fips:
            fip = b_ip_fip_map[del_fip]
            if not fip['port_id']:
                b_ext_client.delete_floatingips(ctx, fip['id'])
                continue
            if need_ns_bridge:
                b_ns_bridge_port = b_ext_client.get_ports(ctx, fip['port_id'])
                entries = core.query_resource(
                    ctx, models.ResourceRouting,
                    [{'key': 'bottom_id', 'comparator': 'eq',
                      'value': b_ns_bridge_port['id']},
                     {'key': 'pod_id', 'comparator': 'eq',
                      'value': b_ext_pod['pod_id']}], [])
                t_ns_bridge_port_id = entries[0]['top_id']
                b_int_fips = b_client.list_floatingips(
                    ctx,
                    [{'key': 'floating_ip_address',
                      'comparator': 'eq',
                      'value': b_ns_bridge_port['fixed_ips'][0]['ip_address']},
                     {'key': 'floating_network_id',
                      'comparator': 'eq',
                      'value': b_ns_bridge_net_id}])
                if b_int_fips:
                    b_client.delete_floatingips(ctx, b_int_fips[0]['id'])

                # for bridge port, we have two resource routing entries, one
                # for bridge port in top pod, another for bridge port in bottom
                # pod. calling t_client.delete_ports will delete bridge port in
                # bottom pod as well as routing entry for it, but we also need
                # to remove routing entry for bridge port in top pod, bridge
                # network will be deleted when deleting router

                # first we update the routing entry to set bottom_id to None
                # and expire the entry, so if we succeed to delete the bridge
                # port next, this expired entry will be deleted; otherwise, we
                # fail to delete the bridge port, when the port is accessed via
                # lock_handle module, that module will find the port and update
                # the entry
                with ctx.session.begin():
                    core.update_resources(
                        ctx, models.ResourceRouting,
                        [{'key': 'bottom_id', 'comparator': 'eq',
                          'value': t_ns_bridge_port_id}],
                        {'bottom_id': None,
                         'created_at': constants.expire_time,
                         'updated_at': constants.expire_time})
                # delete bridge port
                t_client.delete_ports(ctx, t_ns_bridge_port_id)
                # delete the expired entry, even if this deletion fails, we
                # still have a chance that lock_handle module will delete it
                with ctx.session.begin():
                    core.delete_resources(ctx, models.ResourceRouting,
                                          [{'key': 'bottom_id',
                                            'comparator': 'eq',
                                            'value': t_ns_bridge_port_id}])
            b_ext_client.delete_floatingips(ctx, fip['id'])
Beispiel #16
0
def quota_reserve(context,
                  resources,
                  quotas,
                  deltas,
                  expire,
                  until_refresh,
                  max_age,
                  project_id=None):
    elevated = context.elevated()
    with context.session.begin():
        if project_id is None:
            project_id = context.project_id

        # Get the current usages
        usages = _get_quota_usages(context, context.session, project_id)

        # Handle usage refresh
        refresh = False
        work = set(deltas.keys())
        while work:
            resource = work.pop()

            # Do we need to refresh the usage?
            if resource not in usages:
                usages[resource] = _quota_usage_create(elevated,
                                                       project_id,
                                                       resource,
                                                       0,
                                                       0,
                                                       until_refresh or None,
                                                       session=context.session)
                refresh = True
            elif usages[resource].in_use < 0:
                # Negative in_use count indicates a desync, so try to
                # heal from that...
                refresh = True
            elif usages[resource].until_refresh is not None:
                usages[resource].until_refresh -= 1
                if usages[resource].until_refresh <= 0:
                    refresh = True
            elif max_age and usages[resource].updated_at is not None and (
                (usages[resource].updated_at - timeutils.utcnow()).seconds >=
                    max_age):
                refresh = True

            if refresh:
                # no actural usage refresh here

                # refresh from the bottom pod
                usages[resource].until_refresh = until_refresh or None

                # Because more than one resource may be refreshed
                # by the call to the sync routine, and we don't
                # want to double-sync, we make sure all refreshed
                # resources are dropped from the work set.
                work.discard(resource)

                # NOTE(Vek): We make the assumption that the sync
                #            routine actually refreshes the
                #            resources that it is the sync routine
                #            for.  We don't check, because this is
                #            a best-effort mechanism.

        # Check for deltas that would go negative
        unders = [
            r for r, delta in deltas.items()
            if delta < 0 and delta + usages[r].in_use < 0
        ]

        # Now, let's check the quotas
        # NOTE(Vek): We're only concerned about positive increments.
        #            If a project has gone over quota, we want them to
        #            be able to reduce their usage without any
        #            problems.
        overs = [
            r for r, delta in deltas.items()
            if quotas[r] >= 0 and delta >= 0 and quotas[r] < delta +
            usages[r].in_use + usages[r].reserved
        ]

        # NOTE(Vek): The quota check needs to be in the transaction,
        #            but the transaction doesn't fail just because
        #            we're over quota, so the OverQuota raise is
        #            outside the transaction.  If we did the raise
        #            here, our usage updates would be discarded, but
        #            they're not invalidated by being over-quota.

        # Create the reservations
        if not overs:
            reservations = []
            for resource, delta in deltas.items():
                reservation = _reservation_create(elevated,
                                                  str(uuid.uuid4()),
                                                  usages[resource],
                                                  project_id,
                                                  resource,
                                                  delta,
                                                  expire,
                                                  session=context.session)
                reservations.append(reservation.uuid)

                # Also update the reserved quantity
                # NOTE(Vek): Again, we are only concerned here about
                #            positive increments.  Here, though, we're
                #            worried about the following scenario:
                #
                #            1) User initiates resize down.
                #            2) User allocates a new instance.
                #            3) Resize down fails or is reverted.
                #            4) User is now over quota.
                #
                #            To prevent this, we only update the
                #            reserved value if the delta is positive.
                if delta > 0:
                    usages[resource].reserved += delta

    if unders:
        LOG.warning(
            _LW("Change will make usage less than 0 for the following "
                "resources: %s"), unders)
    if overs:
        usages = {
            k: dict(in_use=v['in_use'], reserved=v['reserved'])
            for k, v in usages.items()
        }
        raise exceptions.OverQuota(overs=sorted(overs),
                                   quotas=quotas,
                                   usages=usages)

    return reservations
Beispiel #17
0
def quota_reserve(context, resources, quotas, deltas, expire,
                  until_refresh, max_age, project_id=None):
    elevated = context.elevated()
    with context.session.begin():
        if project_id is None:
            project_id = context.project_id

        # Get the current usages
        usages = _get_quota_usages(context, context.session, project_id)

        # Handle usage refresh
        refresh = False
        work = set(deltas.keys())
        while work:
            resource = work.pop()

            # Do we need to refresh the usage?
            if resource not in usages:
                usages[resource] = _quota_usage_create(elevated,
                                                       project_id,
                                                       resource,
                                                       0, 0,
                                                       until_refresh or None,
                                                       session=context.session)
                refresh = True
            elif usages[resource].in_use < 0:
                # Negative in_use count indicates a desync, so try to
                # heal from that...
                refresh = True
            elif usages[resource].until_refresh is not None:
                usages[resource].until_refresh -= 1
                if usages[resource].until_refresh <= 0:
                    refresh = True
            elif max_age and usages[resource].updated_at is not None and (
                (usages[resource].updated_at -
                    timeutils.utcnow()).seconds >= max_age):
                refresh = True

            if refresh:
                # no actural usage refresh here

                # refresh from the bottom pod
                usages[resource].until_refresh = until_refresh or None

                # Because more than one resource may be refreshed
                # by the call to the sync routine, and we don't
                # want to double-sync, we make sure all refreshed
                # resources are dropped from the work set.
                work.discard(resource)

                # NOTE(Vek): We make the assumption that the sync
                #            routine actually refreshes the
                #            resources that it is the sync routine
                #            for.  We don't check, because this is
                #            a best-effort mechanism.

        # Check for deltas that would go negative
        unders = [r for r, delta in deltas.items()
                  if delta < 0 and delta + usages[r].in_use < 0]

        # Now, let's check the quotas
        # NOTE(Vek): We're only concerned about positive increments.
        #            If a project has gone over quota, we want them to
        #            be able to reduce their usage without any
        #            problems.
        overs = [r for r, delta in deltas.items()
                 if quotas[r] >= 0 and delta >= 0 and
                 quotas[r] < delta + usages[r].in_use + usages[r].reserved]

        # NOTE(Vek): The quota check needs to be in the transaction,
        #            but the transaction doesn't fail just because
        #            we're over quota, so the OverQuota raise is
        #            outside the transaction.  If we did the raise
        #            here, our usage updates would be discarded, but
        #            they're not invalidated by being over-quota.

        # Create the reservations
        if not overs:
            reservations = []
            for resource, delta in deltas.items():
                reservation = _reservation_create(elevated,
                                                  str(uuid.uuid4()),
                                                  usages[resource],
                                                  project_id,
                                                  resource, delta, expire,
                                                  session=context.session)
                reservations.append(reservation.uuid)

                # Also update the reserved quantity
                # NOTE(Vek): Again, we are only concerned here about
                #            positive increments.  Here, though, we're
                #            worried about the following scenario:
                #
                #            1) User initiates resize down.
                #            2) User allocates a new instance.
                #            3) Resize down fails or is reverted.
                #            4) User is now over quota.
                #
                #            To prevent this, we only update the
                #            reserved value if the delta is positive.
                if delta > 0:
                    usages[resource].reserved += delta

    if unders:
        LOG.warning(_LW("Change will make usage less than 0 for the following "
                        "resources: %s"), unders)
    if overs:
        usages = {k: dict(in_use=v['in_use'], reserved=v['reserved'])
                  for k, v in usages.items()}
        raise exceptions.OverQuota(overs=sorted(overs), quotas=quotas,
                                   usages=usages)

    return reservations
Beispiel #18
0
    def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net,
                              t_router, t_ew_bridge_net, t_ew_bridge_subnet,
                              need_ns_bridge):
        b_client = self._get_client(b_pod['pod_name'])

        router_body = {'router': {'name': t_router['id'],
                                  'distributed': False}}
        project_id = t_router['tenant_id']

        # create bottom router in target bottom pod
        _, b_router_id = self.helper.prepare_bottom_element(
            ctx, project_id, b_pod, t_router, 'router', router_body)

        # handle E-W networking
        # create top E-W bridge port
        q_ctx = None  # no need to pass neutron context when using client
        t_ew_bridge_port_id = self.helper.get_bridge_interface(
            ctx, q_ctx, project_id, t_pod, t_ew_bridge_net['id'],
            b_router_id, None, True)

        # create bottom E-W bridge port
        t_ew_bridge_port = t_client.get_ports(ctx, t_ew_bridge_port_id)
        (is_new, b_ew_bridge_port_id,
         _, _) = self.helper.get_bottom_bridge_elements(
            ctx, project_id, b_pod, t_ew_bridge_net, False, t_ew_bridge_subnet,
            t_ew_bridge_port)

        # attach bottom E-W bridge port to bottom router
        if is_new:
            # only attach bridge port the first time
            b_client.action_routers(ctx, 'add_interface', b_router_id,
                                    {'port_id': b_ew_bridge_port_id})
        else:
            # still need to check if the bridge port is bound
            port = b_client.get_ports(ctx, b_ew_bridge_port_id)
            if not port.get('device_id'):
                b_client.action_routers(ctx, 'add_interface', b_router_id,
                                        {'port_id': b_ew_bridge_port_id})

        # handle N-S networking
        if need_ns_bridge:
            t_ns_bridge_net_name = constants.ns_bridge_net_name % project_id
            t_ns_bridge_subnet_name = constants.ns_bridge_subnet_name % (
                project_id)
            t_ns_bridge_net = self._get_resource_by_name(
                t_client, ctx, 'network', t_ns_bridge_net_name)
            t_ns_bridge_subnet = self._get_resource_by_name(
                t_client, ctx, 'subnet', t_ns_bridge_subnet_name)
            # create bottom N-S bridge network and subnet
            (_, _, b_ns_bridge_subnet_id,
             b_ns_bridge_net_id) = self.helper.get_bottom_bridge_elements(
                ctx, project_id, b_pod, t_ns_bridge_net, True,
                t_ns_bridge_subnet, None)
            # create top N-S bridge gateway port
            t_ns_bridge_gateway_id = self.helper.get_bridge_interface(
                ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'],
                b_router_id, None, False)
            t_ns_bridge_gateway = t_client.get_ports(ctx,
                                                     t_ns_bridge_gateway_id)
            # add external gateway for bottom router
            # add gateway is update operation, can run multiple times
            gateway_ip = t_ns_bridge_gateway['fixed_ips'][0]['ip_address']
            b_client.action_routers(
                ctx, 'add_gateway', b_router_id,
                {'network_id': b_ns_bridge_net_id,
                 'external_fixed_ips': [{'subnet_id': b_ns_bridge_subnet_id,
                                         'ip_address': gateway_ip}]})

        # attach internal port to bottom router
        t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'],
                                              t_net['id'])
        b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
            ctx, t_net['id'], b_pod['pod_name'], constants.RT_NETWORK)
        if b_net_id:
            b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
                                                  b_net_id)
        else:
            b_ports = []
        if not t_ports and b_ports:
            # remove redundant bottom interface
            b_port = b_ports[0]
            request_body = {'port_id': b_port['id']}
            b_client.action_routers(ctx, 'remove_interface', b_router_id,
                                    request_body)
        elif t_ports and not b_ports:
            # create new bottom interface
            t_port = t_ports[0]

            # only consider ipv4 address currently
            t_subnet_id = t_port['fixed_ips'][0]['subnet_id']
            t_subnet = t_client.get_subnets(ctx, t_subnet_id)

            (b_net_id,
             subnet_map) = self.helper.prepare_bottom_network_subnets(
                ctx, q_ctx, project_id, b_pod, t_net, [t_subnet])

            # the gateway ip of bottom subnet is set to the ip of t_port, so
            # we just attach the bottom subnet to the bottom router and neutron
            # server in the bottom pod will create the interface for us, using
            # the gateway ip.
            b_client.action_routers(ctx, 'add_interface', b_router_id,
                                    {'subnet_id': subnet_map[t_subnet_id]})

        if not t_router['external_gateway_info']:
            return

        # handle floatingip
        t_ext_net_id = t_router['external_gateway_info']['network_id']
        t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id',
                                                  'comparator': 'eq',
                                                  'value': t_ext_net_id}])
        # skip unbound top floatingip
        t_ip_fip_map = dict([(fip['floating_ip_address'],
                              fip) for fip in t_fips if fip['port_id']])
        mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id,
                                                        constants.RT_NETWORK)
        # bottom external network should exist
        b_ext_pod, b_ext_net_id = mappings[0]
        b_ext_client = self._get_client(b_ext_pod['pod_name'])
        b_fips = b_ext_client.list_floatingips(
            ctx, [{'key': 'floating_network_id', 'comparator': 'eq',
                   'value': b_ext_net_id}])
        # skip unbound bottom floatingip
        b_ip_fip_map = dict([(fip['floating_ip_address'],
                              fip) for fip in b_fips if fip['port_id']])
        add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map]
        del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map]

        for add_fip in add_fips:
            fip = t_ip_fip_map[add_fip]
            t_int_port_id = fip['port_id']
            b_int_port_id = db_api.get_bottom_id_by_top_id_pod_name(
                ctx, t_int_port_id, b_pod['pod_name'], constants.RT_PORT)
            if not b_int_port_id:
                LOG.warning(_LW('Port %(port_id)s associated with floating ip '
                                '%(fip)s is not mapped to bottom pod'),
                            {'port_id': t_int_port_id, 'fip': add_fip})
                continue
            t_int_port = t_client.get_ports(ctx, t_int_port_id)
            if t_int_port['network_id'] != t_net['id']:
                # only handle floating ip association for the given top network
                continue
            if need_ns_bridge:
                # create top N-S bridge interface port
                t_ns_bridge_port_id = self.helper.get_bridge_interface(
                    ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], None,
                    b_int_port_id, False)
                t_ns_bridge_port = t_client.get_ports(ctx, t_ns_bridge_port_id)
                b_ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
                    ctx, t_ns_bridge_net['id'], b_ext_pod['pod_name'],
                    constants.RT_NETWORK)
                port_body = {
                    'port': {
                        'tenant_id': project_id,
                        'admin_state_up': True,
                        'name': 'ns_bridge_port',
                        'network_id': b_ext_bridge_net_id,
                        'fixed_ips': [{'ip_address': t_ns_bridge_port[
                            'fixed_ips'][0]['ip_address']}]
                    }
                }
                _, b_ns_bridge_port_id = self.helper.prepare_bottom_element(
                    ctx, project_id, b_ext_pod, t_ns_bridge_port,
                    constants.RT_PORT, port_body)
                self._safe_create_bottom_floatingip(
                    ctx, b_ext_pod, b_ext_client, b_ext_net_id, add_fip,
                    b_ns_bridge_port_id)
                self._safe_create_bottom_floatingip(
                    ctx, b_pod, b_client, b_ns_bridge_net_id,
                    t_ns_bridge_port['fixed_ips'][0]['ip_address'],
                    b_int_port_id)
            else:
                self._safe_create_bottom_floatingip(
                    ctx, b_pod, b_client, b_ext_net_id, add_fip,
                    b_int_port_id)

        for del_fip in del_fips:
            fip = b_ip_fip_map[del_fip]
            if need_ns_bridge:
                b_ns_bridge_port = b_ext_client.get_ports(ctx, fip['port_id'])
                entries = core.query_resource(
                    ctx, models.ResourceRouting,
                    [{'key': 'bottom_id', 'comparator': 'eq',
                      'value': b_ns_bridge_port['id']},
                     {'key': 'pod_id', 'comparator': 'eq',
                      'value': b_ext_pod['pod_id']}], [])
                t_ns_bridge_port_id = entries[0]['top_id']
                b_int_fips = b_client.list_floatingips(
                    ctx,
                    [{'key': 'floating_ip_address',
                      'comparator': 'eq',
                      'value': b_ns_bridge_port['fixed_ips'][0]['ip_address']},
                     {'key': 'floating_network_id',
                      'comparator': 'eq',
                      'value': b_ns_bridge_net_id}])
                if b_int_fips:
                    b_client.delete_floatingips(ctx, b_int_fips[0]['id'])
                b_ext_client.update_floatingips(
                    ctx, fip['id'], {'floatingip': {'port_id': None}})

                # for bridge port, we have two resource routing entries, one
                # for bridge port in top pod, another for bridge port in bottom
                # pod. calling t_client.delete_ports will delete bridge port in
                # bottom pod as well as routing entry for it, but we also need
                # to remove routing entry for bridge port in top pod, bridge
                # network will be deleted when deleting router

                # first we update the routing entry to set bottom_id to None
                # and expire the entry, so if we succeed to delete the bridge
                # port next, this expired entry will be deleted; otherwise, we
                # fail to delete the bridge port, when the port is accessed via
                # lock_handle module, that module will find the port and update
                # the entry
                with ctx.session.begin():
                    core.update_resources(
                        ctx, models.ResourceRouting,
                        [{'key': 'bottom_id', 'comparator': 'eq',
                          'value': t_ns_bridge_port_id}],
                        {'bottom_id': None,
                         'created_at': constants.expire_time,
                         'updated_at': constants.expire_time})
                # delete bridge port
                t_client.delete_ports(ctx, t_ns_bridge_port_id)
                # delete the expired entry, even if this deletion fails, we
                # still have a chance that lock_handle module will delete it
                with ctx.session.begin():
                    core.delete_resources(ctx, models.ResourceRouting,
                                          [{'key': 'bottom_id',
                                            'comparator': 'eq',
                                            'value': t_ns_bridge_port_id}])
            else:
                b_client.update_floatingips(ctx, fip['id'],
                                            {'floatingip': {'port_id': None}})
    def _build_and_run_instance(self, context, host, instance, image,
                                request_spec, injected_files, admin_password,
                                requested_networks, security_groups,
                                block_device_mapping, node, limits,
                                filter_properties):

        image_name = image.get('name')
        self._notify_about_instance_usage(context, instance, 'create.start',
                                          extra_usage_info={
                                              'image_name': image_name})
        try:
            self._validate_instance_group_policy(context, instance,
                                                 filter_properties)
            with self._build_resources(context, instance, requested_networks,
                                       security_groups, image,
                                       block_device_mapping) as resources:
                instance.vm_state = vm_states.BUILDING
                instance.task_state = task_states.SPAWNING
                instance.save(
                    expected_task_state=task_states.BLOCK_DEVICE_MAPPING)
                cascaded_ports = resources['cascaded_ports']
                request_spec['block_device_mapping'] = block_device_mapping
                request_spec['security_group'] = security_groups
                self._proxy_run_instance(
                    context, instance, request_spec, filter_properties,
                    requested_networks, injected_files, admin_password,
                    None, host, node, None, cascaded_ports)

        except (exception.InstanceNotFound,
                exception.UnexpectedDeletingTaskStateError) as e:
            with excutils.save_and_reraise_exception():
                self._notify_about_instance_usage(context, instance,
                                                  'create.end', fault=e)
        except exception.ComputeResourcesUnavailable as e:
            LOG.debug(e.format_message(), instance=instance)
            self._notify_about_instance_usage(context, instance,
                                              'create.error', fault=e)
            raise exception.RescheduledException(
                instance_uuid=instance.uuid, reason=e.format_message())
        except exception.BuildAbortException as e:
            with excutils.save_and_reraise_exception():
                LOG.debug(e.format_message(), instance=instance)
                self._notify_about_instance_usage(context, instance,
                                                  'create.error', fault=e)
        except (exception.FixedIpLimitExceeded,
                exception.NoMoreNetworks) as e:
            LOG.warn(_LW('No more network or fixed IP to be allocated'),
                     instance=instance)
            self._notify_about_instance_usage(context, instance,
                                              'create.error', fault=e)
            msg = _('Failed to allocate the network(s) with error %s, '
                    'not rescheduling.') % e.format_message()
            raise exception.BuildAbortException(instance_uuid=instance.uuid,
                                                reason=msg)
        except (exception.VirtualInterfaceCreateException,
                exception.VirtualInterfaceMacAddressException) as e:
            LOG.exception(_LE('Failed to allocate network(s)'),
                          instance=instance)
            self._notify_about_instance_usage(context, instance,
                                              'create.error', fault=e)
            msg = _('Failed to allocate the network(s), not rescheduling.')
            raise exception.BuildAbortException(instance_uuid=instance.uuid,
                                                reason=msg)
        except (exception.FlavorDiskTooSmall,
                exception.FlavorMemoryTooSmall,
                exception.ImageNotActive,
                exception.ImageUnacceptable) as e:
            self._notify_about_instance_usage(context, instance,
                                              'create.error', fault=e)
            raise exception.BuildAbortException(instance_uuid=instance.uuid,
                                                reason=e.format_message())
        except Exception as e:
            self._notify_about_instance_usage(context, instance,
                                              'create.error', fault=e)
            raise exception.RescheduledException(
                instance_uuid=instance.uuid, reason=six.text_type(e))