Пример #1
0
 def _update_network_host(self, context, net_uuid):
     """Set the host column in the networks table: note that this won't
        work with multi-host but QuantumManager doesn't support that
        anyways.  The floating IPs mixin required network['host'] to be
        set."""
     entry = db.network_get_by_uuid(context.elevated(), net_uuid)
     entry['host'] = self.host
     db.network_update(context.elevated(), entry['id'], entry)
Пример #2
0
    def disassociate_floating_ip(self, context, address,
                                 affect_auto_assigned=False):
        """Disassociates a floating ip from its fixed ip.

        Makes sure everything makes sense then calls _disassociate_floating_ip,
        rpc'ing to correct host if i'm not it.
        """
        floating_ip = floating_ip_obj.FloatingIP.get_by_address(context,
                                                                address)

        # handle auto assigned
        if not affect_auto_assigned and floating_ip.auto_assigned:
            raise exception.CannotDisassociateAutoAssignedFloatingIP()

        # make sure project owns this floating ip (allocated)
        self._floating_ip_owned_by_project(context, floating_ip)

        # make sure floating ip is associated
        if not floating_ip.fixed_ip_id:
            floating_address = floating_ip.address
            raise exception.FloatingIpNotAssociated(address=floating_address)

        fixed_ip = fixed_ip_obj.FixedIP.get_by_id(context,
                                                  floating_ip.fixed_ip_id)

        # send to correct host, unless i'm the correct host
        network = network_obj.Network.get_by_id(context.elevated(),
                                                fixed_ip.network_id)
        interface = floating_ip.interface
        if network.multi_host:
            instance = instance_obj.Instance.get_by_uuid(
                context, fixed_ip.instance_uuid)
            service = service_obj.Service.get_by_host_and_topic(
                context.elevated(), instance.host, CONF.network_topic)
            if service and self.servicegroup_api.service_is_up(service):
                host = instance.host
            else:
                # NOTE(vish): if the service is down just deallocate the data
                #             locally. Set the host to local so the call will
                #             not go over rpc and set interface to None so the
                #             teardown in the driver does not happen.
                host = self.host
                interface = None
        else:
            host = network.host

        if host == self.host:
            # i'm the correct host
            self._disassociate_floating_ip(context, address, interface,
                                           fixed_ip.instance_uuid)
        else:
            # send to correct host
            self.network_rpcapi._disassociate_floating_ip(context, address,
                    interface, host, fixed_ip.instance_uuid)
Пример #3
0
    def test_get_network_not_in_db(self):
        context = self.mox.CreateMockAnything()
        context.elevated().AndReturn('elevated')
        self.mox.StubOutWithMock(db, 'network_get_by_uuid')
        self.net_man.context = context
        db.network_get_by_uuid('elevated', 'quantum_net_id').AndReturn(None)

        self.mox.ReplayAll()

        network = self.net_man.get_network(context, ('quantum_net_id',
                                                     'net_tenant_id'))
        self.assertEquals(network['quantum_net_id'], 'quantum_net_id')
        self.assertEquals(network['uuid'], 'quantum_net_id')
Пример #4
0
 def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
     """Gets a fixed ip from the pool."""
     # TODO(vish): when this is called by compute, we can associate compute
     #             with a network, or a cluster of computes with a network
     #             and use that network here with a method like
     #             network_get_by_compute_host
     network_ref = self.db.network_get_by_bridge(context.elevated(),
                                                 FLAGS.flat_network_bridge)
     address = self.db.fixed_ip_associate_pool(context.elevated(),
                                               network_ref['id'],
                                               instance_id)
     self.db.fixed_ip_update(context, address, {'allocated': True})
     return address
Пример #5
0
    def deallocate_for_instance(self, context, **kwargs):
        """Called when a VM is terminated.  Loop through each virtual
           interface in the Nova DB and remove the Quantum port and
           clear the IP allocation using the IPAM.  Finally, remove
           the virtual interfaces from the Nova DB.
        """
        instance_id = kwargs.get('instance_id')
        project_id = kwargs.pop('project_id', None)

        admin_context = context.elevated()
        vifs = db.virtual_interface_get_by_instance(admin_context,
                                                    instance_id)

        for vif in vifs:
            network = db.network_get(admin_context, vif['network_id'])

            self.deallocate_port(vif['uuid'], network['uuid'], project_id,
                                 instance_id)

            ipam_tenant_id = self.deallocate_ip_address(context,
                                network['uuid'], project_id, vif, instance_id)

            if FLAGS.quantum_use_dhcp:
                self.update_dhcp(context, ipam_tenant_id, network,
                                 vif, project_id)

            db.virtual_interface_delete(admin_context, vif['id'])
Пример #6
0
    def create_snapshot(self, context, volume_id, snapshot_id):
        """Creates and exports the snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])

        try:
            snap_name = snapshot_ref['name']
            LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
            model_update = self.driver.create_snapshot(snapshot_ref)
            if model_update:
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        model_update)

        except Exception:
            with utils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error'})

        self.db.snapshot_update(context,
                                snapshot_ref['id'], {'status': 'available',
                                                     'progress': '100%'})
        LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
        return snapshot_id
Пример #7
0
    def _teardown_network_on_host(self, context, network):
        if not CONF.fake_network:
            network['dhcp_server'] = self._get_dhcp_ip(context, network)
            dev = self.driver.get_dev(network)

            # NOTE(): For multi hosted networks, if the network is no
            # more used on this host and if VPN forwarding rule aren't handed
            # by the host, we delete the network gateway.
            vpn_address = network['vpn_public_address']
            if (CONF.teardown_unused_network_gateway and
                not objects.Network.in_use_on_host(context, network['id'],
                                                   self.host)):
                LOG.debug("Remove unused gateway %s", network['bridge'])
                if network.enable_dhcp:
                    self.driver.kill_dhcp(dev)
                self.l3driver.remove_gateway(network)
                if not self._uses_shared_ip(network):
                    fip = objects.FixedIP.get_by_address(context,
                                                         network.dhcp_server)
                    fip.allocated = False
                    fip.host = None
                    fip.save()
            # NOTE(): if dhcp server is not set then don't dhcp
            elif network.enable_dhcp:
                # NOTE(): dhcp DB queries require elevated context
                elevated = context.elevated()
                self.driver.update_dhcp(elevated, dev, network)
    def deallocate_for_instance(self, context, **kwargs):
        instance_id = kwargs.get('instance_id')
        project_id = kwargs.pop('project_id', None)
        admin_context = context.elevated()
        networks = self._get_networks_for_instance(admin_context, instance_id,
                                                                  project_id)
        vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
        for n in networks:
            vif_id = "nova-" + str(instance_id) + "-" + str(n['id'])
            # Un-attach the vif and delete the port
            tenant_id = project_id or FLAGS.quantum_default_tenant_id
            quantum_net_id = n['bridge']
            LOG.debug("Using quantum_net_id: %s" % quantum_net_id)
            attachment = vif_id
            port_id = quantum.get_port_by_attachment(tenant_id,
                                            quantum_net_id, attachment)

            # FIXME: tell Quantum that this interface-binding is no
            # longer valid.

            if not port_id:
                LOG.error("Unable to find port with attachment: %s" % \
                                                        (attachment))
            else:
                quantum.unplug_iface(tenant_id, quantum_net_id, port_id)
                quantum.delete_port(tenant_id, quantum_net_id, port_id)

            vif = filter(lambda vif: vif['network_id'] == n['id'], vifs)[0]
            melange.deallocate_ips(n['id'], vif['id'],
                                   project_id=n['project_id'])

        self.db.virtual_interface_delete_by_instance(context, instance_id)
Пример #9
0
    def get_vnc_console(self, context, console_type, instance):
        """Return connection information for a vnc console."""
        context = context.elevated()
        LOG.debug(_("Getting vnc console"), instance=instance)
        token = str(utils.gen_uuid())

        if console_type == 'ajaxterm':
            access_url = '%s?token=%s' % (FLAGS.ajaxterm_base_url, token)
            connect_info = self.driver.get_web_console(instance)
            connect_info['token'] = token
            connect_info['access_url'] = access_url
            return connect_info
        elif console_type == 'novnc':
            # For essex, novncproxy_base_url must include the full path
            # including the html file (like http://myhost/vnc_auto.html)
            access_url = '%s?token=%s' % (FLAGS.novncproxy_base_url, token)
        elif console_type == 'xvpvnc':
            access_url = '%s?token=%s' % (FLAGS.xvpvncproxy_base_url, token)
        else:
            raise exception.ConsoleTypeInvalid(console_type=console_type)

        # Retrieve connect info from driver, and then decorate with our
        # access info token
        connect_info = self.driver.get_vnc_console(instance)
        connect_info['token'] = token
        connect_info['access_url'] = access_url

        return connect_info
Пример #10
0
    def associate_floating_ip(self, context, floating_address, fixed_address,
                              affect_auto_assigned=False):
        """Associates a floating ip with a fixed ip.

        Makes sure everything makes sense then calls _associate_floating_ip,
        rpc'ing to correct host if i'm not it.

        Access to the floating_address is verified but access to the
        fixed_address is not verified. This assumes that that the calling
        side has already verified that the fixed_address is legal by
        checking access to the instance.
        """
        floating_ip = self.db.floating_ip_get_by_address(context,
                                                         floating_address)
        # handle auto_assigned
        if not affect_auto_assigned and floating_ip.get('auto_assigned'):
            return

        # make sure project owns this floating ip (allocated)
        self._floating_ip_owned_by_project(context, floating_ip)

        # disassociate any already associated
        orig_instance_uuid = None
        if floating_ip['fixed_ip_id']:
            # find previously associated instance
            fixed_ip = fixed_ip_obj.FixedIP.get_by_id(
                context, floating_ip['fixed_ip_id'])
            if str(fixed_ip.address) == fixed_address:
                # NOTE(vish): already associated to this address
                return
            orig_instance_uuid = fixed_ip.instance_uuid

            self.disassociate_floating_ip(context, floating_address)

        fixed_ip = fixed_ip_obj.FixedIP.get_by_address(context,
                                                       fixed_address)

        # send to correct host, unless i'm the correct host
        network = self.db.network_get(context.elevated(),
                                      fixed_ip.network_id)
        if network['multi_host']:
            instance = self.db.instance_get_by_uuid(context,
                                                    fixed_ip['instance_uuid'])
            host = instance['host']
        else:
            host = network['host']

        interface = floating_ip.get('interface')
        if host == self.host:
            # i'm the correct host
            self._associate_floating_ip(context, floating_address,
                                        fixed_address, interface,
                                        fixed_ip.instance_uuid)
        else:
            # send to correct host
            self.network_rpcapi._associate_floating_ip(context,
                    floating_address, fixed_address, interface, host,
                    fixed_ip.instance_uuid)

        return orig_instance_uuid
Пример #11
0
    def allocate_for_instance(self, context, **kwargs):
        """Handles allocating the various network resources for an instance.

        rpc.called by network_api
        """
        instance_id = kwargs.pop('instance_id')
        host = kwargs.pop('host')
        project_id = kwargs.pop('project_id')
        type_id = kwargs.pop('instance_type_id')
        vpn = kwargs.pop('vpn')
        admin_context = context.elevated()
        LOG.debug(_("network allocations for instance %s"), instance_id,
                                                            context=context)
        networks = self._get_networks_for_instance(admin_context, instance_id,
                                                                  project_id)
        # Create a port via quantum and attach the vif
        tenant_id = project_id
        for n in networks:
            vif_id = "nova-" + str(instance_id) + "-" + str(n['id'])
            quantum_net_id = n['bridge']
            LOG.debug("Using quantum_net_id: %s" % quantum_net_id)
            port_id = quantum.create_port(tenant_id, quantum_net_id)
            quantum.plug_iface(tenant_id, quantum_net_id, port_id, vif_id)

            # TODO: also communicate "interface-binding" and "tenant-id"
            # to Quantum

        LOG.warn(networks)
        self._allocate_mac_addresses(context, instance_id, networks)
        ips = self._allocate_fixed_ips(admin_context, instance_id, host,
                                       networks, vpn=vpn)
        vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
        return self._construct_instance_nw_info(context, instance_id, type_id,
                                         host, ips, vifs)
Пример #12
0
    def _list_cobalt_hosts(self, context, availability_zone=None):
        """ Returns a list of all the hosts known to openstack running the cobalt service. """
        admin_context = context.elevated()
        services = self.db.service_get_all_by_topic(admin_context, CONF.cobalt_topic)

        if availability_zone is not None and ':' in availability_zone:
            parts = availability_zone.split(':')
            if len(parts) > 2:
                raise exception.NovaException(_('Invalid availability zone'))
            az = parts[0]
            host = parts[1]
            if (az, host) in [(srv['availability_zone'], srv['host']) for srv in services]:
                return [host]
            else:
                return []

        hosts = []
        for srv in services:
            in_availability_zone =  availability_zone is None or \
                                    availability_zone == \
                                            availability_zones.get_host_availability_zone(context,srv['host'])

            if srv['host'] not in hosts and in_availability_zone:
                hosts.append(srv['host'])
        return hosts
Пример #13
0
    def _provision_volume(self, context, vol, vsa_id, availability_zone):

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        now = utils.utcnow()
        options = {
            'size': vol['size'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': None,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': vol['name'],
            'display_description': vol['description'],
            'volume_type_id': vol['volume_type_id'],
            'metadata': dict(to_vsa_id=vsa_id),
            }

        size = vol['size']
        host = vol['host']
        name = vol['name']
        LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
                    "host %(host)s"), locals())

        volume_ref = db.volume_create(context.elevated(), options)
        driver.cast_to_volume_host(context, vol['host'],
                'create_volume', volume_id=volume_ref['id'],
                snapshot_id=None)
Пример #14
0
    def trigger_security_group_rule_destroy_refresh(self, context, rule_ids):
        LOG.debug('rule_ids=%r', rule_ids)
        ctxt = context.elevated()
        tenant_id = context.to_dict()['project_id']

        for rule_id in rule_ids:
            self.rule_manager.delete_for_sg(tenant_id, rule_id)
Пример #15
0
    def resize_claim(self, context, instance_ref, instance_type, limits=None):
        """Indicate that resources are needed for a resize operation to this
        compute host.
        :param context: security context
        :param instance_ref: instance to reserve resources for
        :param instance_type: new instance_type being resized to
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  This
                  should be turned into finalize  a resource claim or free
                  resources after the compute operation is finished.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # generate the migration record and continue the resize:
            migration_ref = self._create_migration(context, instance_ref, instance_type)
            return claims.NopClaim(migration=migration_ref)

        claim = claims.ResizeClaim(instance_ref, instance_type, self)

        if claim.test(self.compute_node, limits):

            migration_ref = self._create_migration(context, instance_ref, instance_type)
            claim.migration = migration_ref

            # Mark the resources in-use for the resize landing on this
            # compute host:
            self._update_usage_from_migration(context, instance_ref, self.compute_node, migration_ref)
            elevated = context.elevated()
            self._update(elevated, self.compute_node)

            return claim

        else:
            raise exception.ComputeResourcesUnavailable()
Пример #16
0
    def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
        """Gets a fixed ip from the pool."""
        # TODO(vish): when this is called by compute, we can associate compute
        #             with a network, or a cluster of computes with a network
        #             and use that network here with a method like
        #             network_get_by_compute_host
        address = None
        if network['cidr']:
            address = kwargs.get('address', None)
            if address:
                address = self.db.fixed_ip_associate(context,
                                                     address, instance_id,
                                                     network['id'])
            else:
                address = self.db.fixed_ip_associate_pool(context.elevated(),
                                                          network['id'],
                                                          instance_id)
            self._do_trigger_security_group_members_refresh_for_instance(
                                                                   instance_id)
            get_vif = self.db.virtual_interface_get_by_instance_and_network
            vif = get_vif(context, instance_id, network['id'])
            values = {'allocated': True,
                      'virtual_interface_id': vif['id']}
            self.db.fixed_ip_update(context, address, values)

        self._setup_network(context, network)
        return address
Пример #17
0
    def _setup_network_on_host(self, context, network):
        """Sets up network on this host."""
        if context.is_admin: 
            if not objects.Network.in_use_on_host(context, network['id'],
                                                  None):
                return 
        
        if not network.vpn_public_address:
            address = CONF.vpn_ip
            network.vpn_public_address = address
            network.save()
        else:
            address = network.vpn_public_address
        network.dhcp_server = self._get_dhcp_ip(context, network)

        self._initialize_network(network)

        # NOTE(): only ensure this forward if the address hasn't been set
        #             manually.
        if address == CONF.vpn_ip and hasattr(self.driver,
                                               "ensure_vpn_forward"):
            self.l3driver.add_vpn(CONF.vpn_ip,
                    network.vpn_public_port,
                    network.vpn_private_address)
        if not CONF.fake_network:
            dev = self.driver.get_dev(network)
            # NOTE(): dhcp DB queries require elevated context
            if network.enable_dhcp:
                elevated = context.elevated()
                self.driver.update_dhcp(elevated, dev, network)
            if CONF.use_ipv6:
                self.driver.update_ra(context, dev, network)
                gateway = utils.get_my_linklocal(dev)
                network.gateway_v6 = gateway
                network.save()
Пример #18
0
 def trigger_instance_remove_security_group_refresh(self, context,
                                                    instance_ref):
     """Refresh and remove security groups given an instance reference."""
     admin_context = context.elevated()
     for group in instance_ref['security_groups']:
         self.conductor_api.security_groups_trigger_handler(context,
             'instance_remove_security_group', instance_ref, group['name'])
Пример #19
0
    def _create(self, req, body):
	context = req.environ['nova.context']
	context = context.elevated()
	print "context!!"
	print context.to_dict()
        vals = body['network']
        name = vals['name']
	size = vals['size']
	project_id=str(req.environ['HTTP_X_TENANT_ID'])
	print FLAGS.network_manager	
	cidr = self.get_new_cidr(context, size)
	print cidr
	print"!!!!!!!!!!!!!!!!strat creating"
	self.create_network(context=context, label=name, fixed_range_v4=cidr, num_networks=1,
               network_size=size, multi_host=None, vlan_start=None,
               vpn_start=None, fixed_range_v6=None, gateway=None,
               gateway_v6=None, bridge=None, bridge_interface=None,
               dns1=None, dns2=None, project_id=project_id, priority=None,
               uuid=None, fixed_cidr=None)
	print cidr	
	db_net = db.network_get_by_cidr(context, cidr)
	net = dict(db_net.iteritems())
	ret_net={}
	ret_net['network']={'id':net['uuid'],'name':net['label'],'cidr':net['cidr']}
        return ret_net
Пример #20
0
    def _create_migration(self, context, instance, instance_type):
        """Create a migration record for the upcoming resize.  This should
        be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
        claim will not be lost if the audit process starts.
        """
        # TODO(russellb): no-db-compute: Send the old instance type
        # info that is needed via rpc so db access isn't required
        # here.
        old_instance_type_id = instance["instance_type_id"]
        old_instance_type = instance_types.get_instance_type(old_instance_type_id)

        return db.migration_create(
            context.elevated(),
            {
                "instance_uuid": instance["uuid"],
                "source_compute": instance["host"],
                "source_node": instance["node"],
                "dest_compute": self.host,
                "dest_node": self.nodename,
                "dest_host": self.driver.get_host_ip_addr(),
                "old_instance_type_id": old_instance_type["id"],
                "new_instance_type_id": instance_type["id"],
                "status": "pre-migrating",
            },
        )
Пример #21
0
    def create_volume(self, context, volume_id):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context,
                              volume_id,
                              {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(_("volume %(vol_name)s: creating lv of"
                    " size %(vol_size)sG") % locals())
            self.driver.create_volume(volume_ref)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            self.driver.create_export(context, volume_ref)
        except Exception:
            self.db.volume_update(context,
                                  volume_ref['id'], {'status': 'error'})
            raise

        now = datetime.datetime.utcnow()
        self.db.volume_update(context,
                              volume_ref['id'], {'status': 'available',
                                                 'launched_at': now})
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        return volume_id
Пример #22
0
    def _provision_volume(self, context, vol, vsa_id, availability_zone):

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        now = utils.utcnow()
        options = {
            "size": vol["size"],
            "user_id": context.user_id,
            "project_id": context.project_id,
            "snapshot_id": None,
            "availability_zone": availability_zone,
            "status": "creating",
            "attach_status": "detached",
            "display_name": vol["name"],
            "display_description": vol["description"],
            "volume_type_id": vol["volume_type_id"],
            "metadata": dict(to_vsa_id=vsa_id),
        }

        size = vol["size"]
        host = vol["host"]
        name = vol["name"]
        LOG.debug(_("Provision volume %(name)s of size %(size)s GB on " "host %(host)s"), locals())

        volume_ref = db.volume_create(context.elevated(), options)
        driver.cast_to_volume_host(context, vol["host"], "create_volume", volume_id=volume_ref["id"], snapshot_id=None)
Пример #23
0
    def free_resources(self, context):
        """A compute operation finished freeing up resources.  Update compute
        model to reflect updated resource usage.

        (The hypervisor may not immediately 'GC' all resources, so ask directly
        to see what's available to update the compute node model.)
        """
        self.update_available_resource(context.elevated())
Пример #24
0
 def remove_compute_volume(self, context, volume_id):
     """Remove remote volume on compute host."""
     context = context.elevated()
     volume_ref = self.db.volume_get(context, volume_id)
     if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
         return True
     else:
         self.driver.undiscover_volume(volume_ref)
Пример #25
0
    def trigger_security_group_members_refresh(self, context, instance_ref):
        """Refresh security group members."""
        admin_context = context.elevated()
        group_ids = [group['id'] for group in instance_ref['security_groups']]

        self.conductor_api.security_groups_trigger_members_refresh(
            admin_context, group_ids)
        self.conductor_api.security_groups_trigger_handler(admin_context,
            'security_group_members', group_ids)
Пример #26
0
    def _check_host_enforcement(self, context, availability_zone):
        if availability_zone and ":" in availability_zone and context.is_admin:
            zone, _x, host = availability_zone.partition(":")
            service = db.service_get_by_args(context.elevated(), host, "nova-volume")
            if service["disabled"] or not utils.service_is_up(service):
                raise exception.WillNotSchedule(host=host)

            return host
        else:
            return None
Пример #27
0
    def instance_claim(self, context, instance_ref, limits=None):
        """Indicate that some resources are needed for an upcoming compute
        instance build operation.

        This should be called before the compute node is about to perform
        an instance build operation that will consume additional resources.

        :param context: security context
        :param instance_ref: instance to reserve resources for
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  It can
                  be used to revert the resource usage if an error occurs
                  during the instance build.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # set the 'host' and node fields and continue the build:
            self._set_instance_host_and_node(context, instance_ref)
            return claims.NopClaim()

        # sanity checks:
        if instance_ref['host']:
            LOG.warning(_("Host field should not be set on the instance until "
                          "resources have been claimed."),
                          instance=instance_ref)

        if instance_ref['node']:
            LOG.warning(_("Node field should not be set on the instance "
                          "until resources have been claimed."),
                          instance=instance_ref)

        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance_ref)
        LOG.debug(_("Memory overhead for %(flavor)d MB instance; %(overhead)d "
                    "MB"), {'flavor': instance_ref['memory_mb'],
                            'overhead': overhead['memory_mb']})

        claim = claims.Claim(instance_ref, self, overhead=overhead)

        if claim.test(self.compute_node, limits):

            self._set_instance_host_and_node(context, instance_ref)

            # Mark resources in-use and update stats
            self._update_usage_from_instance(self.compute_node, instance_ref)

            elevated = context.elevated()
            # persist changes to the compute node:
            self._update(elevated, self.compute_node)

            return claim

        else:
            raise exception.ComputeResourcesUnavailable()
Пример #28
0
    def setup_compute_volume(self, context, volume_id):
        """Setup remote volume on compute host.

        Returns path to device."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
            path = self.driver.local_path(volume_ref)
        else:
            path = self.driver.discover_volume(context, volume_ref)
        return path
Пример #29
0
    def import_blessed_instance(self, context, data):
        """
        Imports the instance as a new blessed instance.
        """

        # NOTE(dscannell) we need to do all the bless quota stuff around here because
        # we are essentially creating a new blessed instance into the system.

        fields = data['fields']
        fields['disable_terminate'] = True

        if not context.is_admin:
            fields['project_id'] = context.project_id
            fields['user_id'] = context.user_id

        flavor_name = data['flavor_name']

        try:
            inst_type = self.compute_api.db.\
                                 instance_type_get_by_name(context, flavor_name)
        except exception.InstanceTypeNotFoundByName:
            raise exception.NovaException(_('Flavor could not be found: %s' \
                                                                 % flavor_name))

        fields['instance_type_id'] = inst_type['id']

        secgroup_ids = []

        for secgroup_name in data['security_group_names']:
            try:
                secgroup = self.db.security_group_get_by_name(context,
                                              context.project_id, secgroup_name)
            except exception.SecurityGroupNotFoundForProject:
                raise exception.NovaException(_('Security group could not be found: %s'\
                                                               % secgroup_name))
            secgroup_ids.append(secgroup['id'])

        instance = self.db.instance_create(context, data['fields'])
        LOG.debug(_("Imported new instance %s" % (instance)))
        self._instance_metadata_update(context, instance['uuid'],
                                                               data['metadata'])
        self.db.instance_update(context, instance['uuid'],
                                {'vm_state':vm_states.BUILDING,
                                 'system_metadata': data['system_metadata']})

        # Apply the security groups
        for secgroup_id in secgroup_ids:
                self.db.instance_add_security_group(context.elevated(),
                                                  instance['uuid'], secgroup_id)

        self._cast_cobalt_message('import_instance', context,
                 instance['uuid'], params={'image_id': data['export_image_id']})

        return self.get(context, instance['uuid'])
Пример #30
0
    def create_volume(self, context, volume_id, snapshot_id=None, image_id=None, reservations=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        self._notify_about_volume_usage(context, volume_ref, "create.start")
        LOG.info(_("volume %s: creating"), volume_ref["name"])

        self.db.volume_update(context, volume_id, {"host": self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref["host"] = self.host

        status = "available"
        model_update = False

        try:
            vol_name = volume_ref["name"]
            vol_size = volume_ref["size"]
            LOG.debug(_("volume %(vol_name)s: creating lv of" " size %(vol_size)sG") % locals())
            if snapshot_id is None and image_id is None:
                model_update = self.driver.create_volume(volume_ref)
            elif snapshot_id is not None:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(volume_ref, snapshot_ref)
            else:
                # create the volume from an image
                image_service, image_id = glance.get_remote_image_service(context, image_id)
                image_location = image_service.get_location(context, image_id)
                cloned = self.driver.clone_image(volume_ref, image_location)
                if not cloned:
                    model_update = self.driver.create_volume(volume_ref)
                    status = "downloading"

            if model_update:
                self.db.volume_update(context, volume_ref["id"], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref["name"])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref["id"], model_update)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_ref["id"], {"status": "error"})

        now = timeutils.utcnow()
        volume_ref = self.db.volume_update(context, volume_ref["id"], {"status": status, "launched_at": now})
        LOG.debug(_("volume %s: created successfully"), volume_ref["name"])
        self._reset_stats()
        self._notify_about_volume_usage(context, volume_ref, "create.end")

        if image_id and not cloned:
            # copy the image onto the volume.
            self._copy_image_to_volume(context, volume_ref, image_id)
        return volume_id
Пример #31
0
    def deallocate_for_instance(self, context, **kwargs):
        """Handles deallocating floating IP resources for an instance.

        calls super class deallocate_for_instance() as well.

        rpc.called by network_api
        """
        if 'instance' in kwargs:
            instance_uuid = kwargs['instance'].uuid
        else:
            instance_uuid = kwargs['instance_id']
            if not uuidutils.is_uuid_like(instance_uuid):
                # NOTE(francois.charlier): in some cases the instance might be
                # deleted before the IPs are released, so we need to get
                # deleted instances too
                instance = objects.Instance.get_by_id(
                    context.elevated(read_deleted='yes'), instance_uuid)
                instance_uuid = instance.uuid

        try:
            fixed_ips = objects.FixedIPList.get_by_instance_uuid(
                context, instance_uuid)
        except exception.FixedIpNotFoundForInstance:
            fixed_ips = []
        # add to kwargs so we can pass to super to save a db lookup there
        kwargs['fixed_ips'] = fixed_ips
        for fixed_ip in fixed_ips:
            fixed_id = fixed_ip.id
            floating_ips = objects.FloatingIPList.get_by_fixed_ip_id(
                context, fixed_id)
            # disassociate floating ips related to fixed_ip
            for floating_ip in floating_ips:
                address = str(floating_ip.address)
                try:
                    self.disassociate_floating_ip(context,
                                                  address,
                                                  affect_auto_assigned=True)
                except exception.FloatingIpNotAssociated:
                    LOG.info(_LI("Floating IP %s is not associated. Ignore."),
                             address)
                # deallocate if auto_assigned
                if floating_ip.auto_assigned:
                    self.deallocate_floating_ip(context,
                                                address,
                                                affect_auto_assigned=True)

        # call the next inherited class's deallocate_for_instance()
        # which is currently the NetworkManager version
        # call this after so floating IPs are handled first
        super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
Пример #32
0
    def update_usage(self, context, instance):
        """Update the resource usage and stats after a change in an
        instance
        """
        if self.disabled:
            return

        uuid = instance['uuid']

        # don't update usage for this instance unless it submitted a resource
        # claim first:
        if uuid in self.tracked_instances:
            self._update_usage_from_instance(self.compute_node, instance)
            self._update(context.elevated(), self.compute_node)
Пример #33
0
    def deallocate_for_instance(self, context, **kwargs):
        """Called when a VM is terminated.  Loop through each virtual
           interface in the Nova DB and remove the Quantum port and
           clear the IP allocation using the IPAM.  Finally, remove
           the virtual interfaces from the Nova DB.
        """
        instance_id = kwargs.get('instance_id')
        project_id = kwargs.pop('project_id', None)

        admin_context = context.elevated()
        vifs = db.virtual_interface_get_by_instance(admin_context,
                                                    instance_id)
        for vif_ref in vifs:
            interface_id = vif_ref['uuid']
            q_tenant_id = project_id

            network_ref = db.network_get(admin_context, vif_ref['network_id'])
            net_id = network_ref['uuid']

            port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
                                                         net_id, interface_id)
            if not port_id:
                q_tenant_id = FLAGS.quantum_default_tenant_id
                port_id = self.q_conn.get_port_by_attachment(
                    q_tenant_id, net_id, interface_id)

            if not port_id:
                LOG.error("Unable to find port with attachment: %s" %
                          (interface_id))
            else:
                self.q_conn.detach_and_delete_port(q_tenant_id,
                                                   net_id, port_id)

            ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
                net_id, vif_ref['uuid'], project_id)

            self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
                                            net_id, vif_ref)

            # If DHCP is enabled on this network then we need to update the
            # leases and restart the server.
            if FLAGS.quantum_use_dhcp:
                self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref,
                    project_id)
        try:
            db.virtual_interface_delete_by_instance(admin_context,
                                                    instance_id)
        except exception.InstanceNotFound:
            LOG.error(_("Attempted to deallocate non-existent instance: %s" %
                        (instance_id)))
Пример #34
0
    def update_load_stats_for_instance(self, context, instance_ref):
        """Update workload stats for the local compute host."""

        if self.disabled:
            return

        values = {}
        self.stats.update_stats_for_instance(instance_ref)
        values['stats'] = self.stats

        values['current_workload'] = self.stats.calculate_workload()
        values['running_vms'] = self.stats.num_instances
        values['vcpus_used'] = self.stats.num_vcpus_used

        self.compute_node = self._update(context.elevated(), values)
Пример #35
0
    def resize_claim(self, context, instance, instance_type, limits=None):
        """Indicate that resources are needed for a resize operation to this
        compute host.
        :param context: security context
        :param instance: instance object to reserve resources for
        :param instance_type: new instance_type being resized to
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  This
                  should be turned into finalize  a resource claim or free
                  resources after the compute operation is finished.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # generate the migration record and continue the resize:
            migration = self._create_migration(context, instance,
                                               instance_type)
            return claims.NopClaim(migration=migration)

        # get memory overhead required to build this instance:
        overhead = self.driver.estimate_instance_overhead(instance_type)
        LOG.debug(
            "Memory overhead for %(flavor)d MB instance; %(overhead)d "
            "MB", {
                'flavor': instance_type['memory_mb'],
                'overhead': overhead['memory_mb']
            })

        instance_ref = obj_base.obj_to_primitive(instance)
        claim = claims.ResizeClaim(instance_ref,
                                   instance_type,
                                   self,
                                   self.compute_node,
                                   overhead=overhead,
                                   limits=limits)

        migration = self._create_migration(context, instance_ref,
                                           instance_type)
        claim.migration = migration

        # Mark the resources in-use for the resize landing on this
        # compute host:
        self._update_usage_from_migration(context, instance_ref,
                                          self.compute_node, migration)
        elevated = context.elevated()
        self._update(elevated, self.compute_node)

        return claim
Пример #36
0
 def copy_volume_to_image(self, context, volume_id, image_id):
     """Uploads the specified volume to Glance."""
     payload = {'volume_id': volume_id, 'image_id': image_id}
     try:
         volume = self.db.volume_get(context, volume_id)
         self.driver.ensure_export(context.elevated(), volume)
         image_service, image_id = glance.get_remote_image_service(
             context, image_id)
         self.driver.copy_volume_to_image(context, volume, image_service,
                                          image_id)
         LOG.debug(
             _("Uploaded volume %(volume_id)s to "
               "image (%(image_id)s) successfully") % locals())
     except Exception, error:
         with excutils.save_and_reraise_exception():
             payload['message'] = unicode(error)
Пример #37
0
    def delete_snapshot(self, context, snapshot_id):
        """Deletes and unexports snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)

        try:
            LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
            self.driver.delete_snapshot(snapshot_ref)
        except Exception:
            with utils.save_and_reraise_exception():
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        {'status': 'error_deleting'})

        self.db.snapshot_destroy(context, snapshot_id)
        LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
        return True
Пример #38
0
    def delete_volume(self, context, volume_id):
        """Deletes and unexports volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['attach_status'] == "attached":
            # Volume is still attached, need to detach first
            raise exception.VolumeAttached(volume_id=volume_id)
        if volume_ref['host'] != self.host:
            raise exception.InvalidVolume(
                reason=_("Volume is not local to this node"))

        self._notify_about_volume_usage(context, volume_ref, "delete.start")
        self._reset_stats()
        try:
            LOG.debug(_("volume %s: removing export"), volume_ref['name'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug(_("volume %s: deleting"), volume_ref['name'])
            self.driver.delete_volume(volume_ref)
        except exception.VolumeIsBusy:
            LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
            self.driver.ensure_export(context, volume_ref)
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'available'})
            return True
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_ref['id'],
                                      {'status': 'error_deleting'})

        # Get reservations
        try:
            reservations = QUOTAS.reserve(context,
                                          volumes=-1,
                                          gigabytes=-volume_ref['size'])
        except Exception:
            reservations = None
            LOG.exception(_("Failed to update usages deleting volume"))

        volume_ref = self.db.volume_destroy(context, volume_id)
        LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
        self._notify_about_volume_usage(context, volume_ref, "delete.end")

        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations)

        return True
Пример #39
0
    def detach_volume(self, context, volume_id):
        """Updates db to show volume is detached"""
        # TODO(vish): refactor this into a more general "unreserve"
        try:
            self.driver.detach_volume(context, volume_id)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.volume_update(context, volume_id,
                                      {'status': 'error_detaching'})

        self.db.volume_detached(context.elevated(), volume_id)

        # Check for https://bugs.launchpad.net/nova/+bug/1065702
        volume_ref = self.db.volume_get(context, volume_id)
        if (volume_ref['provider_location']
                and volume_ref['name'] not in volume_ref['provider_location']):
            self.driver.ensure_export(context, volume_ref)
Пример #40
0
    def delete_network(self, context, fixed_range, uuid):
        """Lookup network by uuid, delete both the IPAM
           subnet and the corresponding Quantum network.

           The fixed_range parameter is kept here for interface compatibility
           but is not used.
        """
        net_ref = db.network_get_by_uuid(context.elevated(), uuid)
        project_id = net_ref['project_id']
        q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
        net_uuid = net_ref['uuid']

        # Check for any attached ports on the network and fail the deletion if
        # there is anything but the gateway port attached.  If it is only the
        # gateway port, unattach and delete it.
        ports = self.q_conn.get_attached_ports(q_tenant_id, net_uuid)
        num_ports = len(ports)
        gw_interface_id = self.driver.get_dev(net_ref)
        gw_port_uuid = None
        if gw_interface_id is not None:
            gw_port_uuid = self.q_conn.get_port_by_attachment(q_tenant_id,
                                        net_uuid, gw_interface_id)

        if gw_port_uuid:
            num_ports -= 1

        if num_ports > 0:
            raise Exception(_("Network %s has active ports, cannot delete"
                                                            % (net_uuid)))

        # only delete gw ports if we are going to finish deleting network
        if gw_port_uuid:
            self.q_conn.detach_and_delete_port(q_tenant_id,
                                                   net_uuid,
                                                   gw_port_uuid)

        # Now we can delete the network
        self.q_conn.delete_network(q_tenant_id, net_uuid)
        LOG.debug("Deleting network %s for tenant: %s" % \
                                    (net_uuid, q_tenant_id))
        self.ipam.delete_subnets_by_net_id(context, net_uuid, project_id)
        # Get rid of dnsmasq
        if FLAGS.quantum_use_dhcp:
            dev = self.driver.get_dev(net_ref)
            if self.driver._device_exists(dev):
                self.driver.kill_dhcp(dev)
Пример #41
0
 def get_dhcp_leases(self, context, network_ref):
     """Return a network's hosts config in dnsmasq leasefile format."""
     subnet_id = network_ref['uuid']
     project_id = network_ref['project_id']
     ips = self.ipam.get_allocated_ips(context, subnet_id, project_id)
     leases_text = ""
     admin_context = context.elevated()
     for ip in ips:
         address, vif_id = ip
         vif = db.virtual_interface_get_by_uuid(admin_context, vif_id)
         mac_address = vif['address']
         text = "%s %s %s %s *\n" % \
             (int(time.time()) - FLAGS.dhcp_lease_time,
              mac_address, address, '*')
         leases_text += text
     LOG.debug("DHCP leases: %s" % leases_text)
     return leases_text
Пример #42
0
 def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
     """Gets a fixed ip from the pool."""
     # TODO(vish): when this is called by compute, we can associate compute
     #             with a network, or a cluster of computes with a network
     #             and use that network here with a method like
     #             network_get_by_compute_host
     address = self.db.fixed_ip_associate_pool(context.elevated(),
                                               network['id'],
                                               instance_id)
     vif = self.db.virtual_interface_get_by_instance_and_network(context,
                                                             instance_id,
                                                             network['id'])
     values = {'allocated': True,
               'virtual_interface_id': vif['id']}
     self.db.fixed_ip_update(context, address, values)
     self._setup_network(context, network)
     return address
Пример #43
0
 def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
     """Gets a fixed ip from the pool."""
     # TODO(vish): This should probably be getting project_id from
     #             the instance, but it is another trip to the db.
     #             Perhaps this method should take an instance_ref.
     ctxt = context.elevated()
     network_ref = self.db.project_get_network(ctxt, context.project_id)
     if kwargs.get('vpn', None):
         address = network_ref['vpn_private_address']
         self.db.fixed_ip_associate(ctxt, address, instance_id)
     else:
         address = self.db.fixed_ip_associate_pool(ctxt, network_ref['id'],
                                                   instance_id)
     self.db.fixed_ip_update(context, address, {'allocated': True})
     if not FLAGS.fake_network:
         self.driver.update_dhcp(context, network_ref['id'])
     return address
Пример #44
0
    def trigger_security_group_create_refresh(self, context, group):
        """Create a chain and port group for the security group."""

        LOG.debug('group=%r', group)
        ctxt = context.elevated()
        sg_ref = db.security_group_get_by_name(ctxt, group['project_id'],
                                               group['name'])

        tenant_id = context.to_dict()['project_id']
        sg_id = sg_ref['id']
        sg_name = group['name']

        # create a chain for the security group
        self.chain_manager.create_for_sg(tenant_id, sg_id, sg_name)

        # create a port group for the security group
        self.pg_manager.create(tenant_id, sg_id, sg_name)
Пример #45
0
    def create_volume(self, context, volume_id, snapshot_id=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context,
                              volume_id,
                              {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(_("volume %(vol_name)s: creating lv of"
                    " size %(vol_size)sG") % locals())
            if snapshot_id is None:
                model_update = self.driver.create_volume(volume_ref)
            else:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(
                    volume_ref,
                    snapshot_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)
        except Exception:
            with utils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'], {'status': 'error'})
                self._notify_vsa(context, volume_ref, 'error')

        now = utils.utcnow()
        self.db.volume_update(context,
                              volume_ref['id'], {'status': 'available',
                                                 'launched_at': now})
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        self._notify_vsa(context, volume_ref, 'available')
        self._reset_stats()
        return volume_id
Пример #46
0
    def deallocate_for_instance(self, context, **kwargs):
        """Handles deallocating floating IP resources for an instance.

        calls super class deallocate_for_instance() as well.

        rpc.called by network_api
        """
        instance_id = kwargs.get('instance_id')

        # NOTE(francois.charlier): in some cases the instance might be
        # deleted before the IPs are released, so we need to get deleted
        # instances too
        instance = self.db.instance_get(context.elevated(read_deleted='yes'),
                                        instance_id)

        try:
            fixed_ips = self.db.fixed_ip_get_by_instance(
                context, instance['uuid'])
        except exception.FixedIpNotFoundForInstance:
            fixed_ips = []
        # add to kwargs so we can pass to super to save a db lookup there
        kwargs['fixed_ips'] = fixed_ips
        for fixed_ip in fixed_ips:
            fixed_id = fixed_ip['id']
            floating_ips = self.db.floating_ip_get_by_fixed_ip_id(
                context, fixed_id)
            # disassociate floating ips related to fixed_ip
            for floating_ip in floating_ips:
                address = floating_ip['address']
                try:
                    self.disassociate_floating_ip(context,
                                                  address,
                                                  affect_auto_assigned=True)
                except exception.FloatingIpNotAssociated:
                    LOG.exception(_("Floating IP is not associated. Ignore."))
                # deallocate if auto_assigned
                if floating_ip['auto_assigned']:
                    self.deallocate_floating_ip(context,
                                                address,
                                                affect_auto_assigned=True)

        # call the next inherited class's deallocate_for_instance()
        # which is currently the NetworkManager version
        # call this after so floating IPs are handled first
        super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
Пример #47
0
    def _get_dhcp_ip(self, context, network_ref, host=None):
        """Get the proper dhcp address to listen on."""
        # NOTE(vish): this is for compatibility
        if not network_ref['multi_host']:
            return network_ref['gateway']

        if not host:
            host = self.host
        network_id = network_ref['id']
        try:
            fip = self.db.fixed_ip_get_by_network_host(context, network_id,
                                                       host)
            return fip['address']
        except exception.FixedIpNotFoundForNetworkHost:
            elevated = context.elevated()
            return self.db.fixed_ip_associate_pool(elevated,
                                                   network_id,
                                                   host=host)
Пример #48
0
 def _create_migration(self, context, instance, instance_type):
     """Create a migration record for the upcoming resize.  This should
     be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
     claim will not be lost if the audit process starts.
     """
     old_instance_type = flavors.extract_flavor(instance)
     migration = migration_obj.Migration()
     migration.dest_compute = self.host
     migration.dest_node = self.nodename
     migration.dest_host = self.driver.get_host_ip_addr()
     migration.old_instance_type_id = old_instance_type['id']
     migration.new_instance_type_id = instance_type['id']
     migration.status = 'pre-migrating'
     migration.instance_uuid = instance['uuid']
     migration.source_compute = instance['host']
     migration.source_node = instance['node']
     migration.create(context.elevated())
     return migration
Пример #49
0
    def get_instance_nw_info(self, context, instance_id, instance_uuid,
                             rxtx_factor, host, **kwargs):
        """This method is used by compute to fetch all network data
           that should be used when creating the VM.

           The method simply loops through all virtual interfaces
           stored in the nova DB and queries the IPAM lib to get
           the associated IP data.

           The format of returned data is 'defined' by the initial
           set of NetworkManagers found in nova/network/manager.py .
           Ideally this 'interface' will be more formally defined
           in the future.
        """
        project_id = kwargs['project_id']
        vifs = db.virtual_interface_get_by_instance(context, instance_id)

        net_tenant_dict = dict(
            (net_id, tenant_id) for (net_id, tenant_id) in
            self.ipam.get_project_and_global_net_ids(context, project_id))
        networks = {}
        for vif in vifs:
            if vif.get('network_id') is not None:
                network = db.network_get(context.elevated(), vif['network_id'])
                net_tenant_id = net_tenant_dict[network['uuid']]
                if net_tenant_id is None:
                    net_tenant_id = FLAGS.quantum_default_tenant_id
                network = {
                    'id': network['id'],
                    'uuid': network['uuid'],
                    'bridge': '',  # Quantum ignores this field
                    'label': network['label'],
                    'injected': FLAGS.flat_injected,
                    'project_id': net_tenant_id
                }
                networks[vif['uuid']] = network

        # update instance network cache and return network_info
        nw_info = self.build_network_info_model(context, vifs, networks,
                                                rxtx_factor, host)
        db.instance_info_cache_update(context, instance_uuid,
                                      {'network_info': nw_info.as_cache()})

        return nw_info
Пример #50
0
    def get_network_host(self, context):
        """Get the network for the current context."""
        network_ref = self.db.project_get_network(context.elevated(),
                                                  context.project_id)
        # NOTE(vish): If the network has no host, do a call to get an
        #             available host.  This should be changed to go through
        #             the scheduler at some point.
        host = network_ref['host']
        if not host:
            if FLAGS.fake_call:
                return self.set_network_host(context, network_ref['id'])
            host = rpc.call(
                context, FLAGS.network_topic, {
                    "method": "set_network_host",
                    "args": {
                        "network_id": network_ref['id']
                    }
                })

        return host
Пример #51
0
    def delete_volume(self, context, volume_id):
        """Deletes and unexports volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['attach_status'] == "attached":
            raise exception.Error(_("Volume is still attached"))
        if volume_ref['host'] != self.host:
            raise exception.Error(_("Volume is not local to this node"))

        try:
            LOG.debug(_("volume %s: removing export"), volume_ref['name'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug(_("volume %s: deleting"), volume_ref['name'])
            self.driver.delete_volume(volume_ref)
        except exception.VolumeIsBusy, e:
            LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
            self.driver.ensure_export(context, volume_ref)
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'available'})
            return True
Пример #52
0
    def _create_migration(self, context, instance, instance_type):
        """Create a migration record for the upcoming resize.  This should
        be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
        claim will not be lost if the audit process starts.
        """
        # TODO(russellb): no-db-compute: Send the old instance type
        # info that is needed via rpc so db access isn't required
        # here.
        old_instance_type_id = instance['instance_type_id']
        old_instance_type = instance_types.get_instance_type(
                old_instance_type_id)

        return db.migration_create(context.elevated(),
                {'instance_uuid': instance['uuid'],
                 'source_compute': instance['host'],
                 'dest_compute': self.host,
                 'dest_host': self.driver.get_host_ip_addr(),
                 'old_instance_type_id': old_instance_type['id'],
                 'new_instance_type_id': instance_type['id'],
                 'status': 'pre-migrating'})
Пример #53
0
    def allocate_for_instance(self, context, **kwargs):
        """Handles allocating the floating IP resources for an instance.

        calls super class allocate_for_instance() as well

        rpc.called by network_api
        """
        instance_uuid = kwargs.get('instance_id')
        if not uuidutils.is_uuid_like(instance_uuid):
            instance_uuid = kwargs.get('instance_uuid')
        project_id = kwargs.get('project_id')
        # call the next inherited class's allocate_for_instance()
        # which is currently the NetworkManager version
        # do this first so fixed ip is already allocated
        nw_info = super(FloatingIP,
                        self).allocate_for_instance(context, **kwargs)
        if CONF.auto_assign_floating_ip:
            context = context.elevated()
            # allocate a floating ip
            floating_address = self.allocate_floating_ip(
                context, project_id, True)
            LOG.debug("floating IP allocation for instance "
                      "|%s|",
                      floating_address,
                      instance_uuid=instance_uuid,
                      context=context)

            # get the first fixed address belonging to the instance
            fixed_ips = nw_info.fixed_ips()
            fixed_address = fixed_ips[0]['address']

            # associate the floating ip to fixed_ip
            self.associate_floating_ip(context,
                                       floating_address,
                                       fixed_address,
                                       affect_auto_assigned=True)

            # create a fresh set of network info that contains the floating ip
            nw_info = self.get_instance_nw_info(context, **kwargs)

        return nw_info
Пример #54
0
    def get_network(self, context, proj_pair):
        (quantum_net_id, net_tenant_id) = proj_pair

        net_tenant_id = net_tenant_id or FLAGS.quantum_default_tenant_id
        # FIXME(danwent): We'd like to have the manager be
        # completely decoupled from the nova networks table.
        # However, other parts of nova sometimes go behind our
        # back and access network data directly from the DB.  So
        # for now, the quantum manager knows that there is a nova
        # networks DB table and accesses it here.  updating the
        # virtual_interfaces table to use UUIDs would be one
        # solution, but this would require significant work
        # elsewhere.
        admin_context = context.elevated()

        # We may not be able to get a network_ref here if this network
        # isn't in the database (i.e. it came from Quantum).
        network_ref = db.network_get_by_uuid(admin_context, quantum_net_id)

        if network_ref is None:
            network_ref = {}
            network_ref = {
                "uuid": quantum_net_id,
                "project_id": net_tenant_id,
                # NOTE(bgh): We need to document this somewhere but since
                # we don't know the priority of any networks we get from
                # quantum we just give them a priority of 0.  If its
                # necessary to specify the order of the vifs and what
                # network they map to then the user will have to use the
                # OSCreateServer extension and specify them explicitly.
                #
                # In the future users will be able to tag quantum networks
                # with a priority .. and at that point we can update the
                # code here to reflect that.
                "priority": 0,
                "id": 'NULL',
                "label": "quantum-net-%s" % quantum_net_id
            }
        network_ref['net_tenant_id'] = net_tenant_id
        network_ref['quantum_net_id'] = quantum_net_id
        return network_ref
Пример #55
0
    def allocate_for_instance(self, context, **kwargs):
        """Handles allocating the various network resources for an instance.

        rpc.called by network_api
        """
        instance_id = kwargs.pop('instance_id')
        host = kwargs.pop('host')
        project_id = kwargs.pop('project_id')
        type_id = kwargs.pop('instance_type_id')
        vpn = kwargs.pop('vpn')
        admin_context = context.elevated()
        LOG.debug(_("network allocations for instance %s"), instance_id,
                                                            context=context)
        networks = self._get_networks_for_instance(admin_context, instance_id,
                                                                  project_id)
        LOG.warn(networks)
        self._allocate_mac_addresses(context, instance_id, networks)
        self._allocate_fixed_ips(admin_context, instance_id, host, networks,
                                 vpn=vpn)
        self._allocate_dns_entry(admin_context, instance_id, networks, vpn=vpn)
        return self.get_instance_nw_info(context, instance_id, type_id, host)
Пример #56
0
    def deallocate_for_instance(self, context, **kwargs):
        """Called when a VM is terminated.  Loop through each virtual
           interface in the Nova DB and remove the Quantum port and
           clear the IP allocation using the IPAM.  Finally, remove
           the virtual interfaces from the Nova DB.
        """
        instance_id = kwargs.get('instance_id')
        project_id = kwargs.pop('project_id', None)

        admin_context = context.elevated()
        vifs = db.virtual_interface_get_by_instance(admin_context, instance_id)

        for vif in vifs:
            network = db.network_get(admin_context, vif['network_id'])

            self.deallocate_port(vif['uuid'], network['uuid'], project_id,
                                 instance_id)

            ipam_tenant_id = self.deallocate_ip_address(
                context, network['uuid'], project_id, vif, instance_id)

            if FLAGS.quantum_use_dhcp:
                if network['host'] == self.host:
                    self.update_dhcp(context, ipam_tenant_id, network, vif,
                                     project_id)
                else:
                    topic = self.db.queue_get_for(context, FLAGS.network_topic,
                                                  network['host'])
                    rpc.call(
                        context, topic, {
                            'method': 'update_dhcp',
                            'args': {
                                'ipam_tenant_id': ipam_tenant_id,
                                'network_ref': network,
                                'vif_ref': vif,
                                'project_id': network['project_id']
                            }
                        })

            db.virtual_interface_delete(admin_context, vif['id'])
Пример #57
0
    def delete_volume(self, context, volume_id):
        """Deletes and unexports volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['attach_status'] == "attached":
            raise exception.Error(_("Volume is still attached"))
        if volume_ref['host'] != self.host:
            raise exception.Error(_("Volume is not local to this node"))

        try:
            LOG.debug(_("volume %s: removing export"), volume_ref['name'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug(_("volume %s: deleting"), volume_ref['name'])
            self.driver.delete_volume(volume_ref)
        except Exception:
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'error_deleting'})
            raise

        self.db.volume_destroy(context, volume_id)
        LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
        return True
    def resize_claim(self, context, instance_ref, instance_type, limits=None):
        """Indicate that resources are needed for a resize operation to this
        compute host.
        :param context: security context
        :param instance_ref: instance to reserve resources for
        :param instance_type: new instance_type being resized to
        :param limits: Dict of oversubscription limits for memory, disk,
                       and CPUs.
        :returns: A Claim ticket representing the reserved resources.  This
                  should be turned into finalize  a resource claim or free
                  resources after the compute operation is finished.
        """
        if self.disabled:
            # compute_driver doesn't support resource tracking, just
            # generate the migration record and continue the resize:
            migration_ref = self._create_migration(context, instance_ref,
                                                   instance_type)
            return claims.NopClaim(migration=migration_ref)

        claim = claims.ResizeClaim(instance_ref, instance_type, self)

        if claim.test(self.compute_node, limits):

            migration_ref = self._create_migration(context, instance_ref,
                                                   instance_type)
            claim.migration = migration_ref

            # Mark the resources in-use for the resize landing on this
            # compute host:
            self._update_usage_from_migration(context, instance_ref,
                                              self.compute_node, migration_ref)
            elevated = context.elevated()
            self._update(elevated, self.compute_node)

            return claim

        else:
            raise exception.ComputeResourcesUnavailable()
Пример #59
0
 def _describe_availability_zones(self, context, **kwargs):
     ctxt = context.elevated()
     enabled_services = db.service_get_all(ctxt, False)
     disabled_services = db.service_get_all(ctxt, True)
     available_zones = []
     for zone in [
             service.availability_zone for service in enabled_services
     ]:
         if not zone in available_zones:
             available_zones.append(zone)
     not_available_zones = []
     for zone in [
             service.availability_zone for service in disabled_services
             if not service['availability_zone'] in available_zones
     ]:
         if not zone in not_available_zones:
             not_available_zones.append(zone)
     result = []
     for zone in available_zones:
         result.append({'zoneName': zone, 'zoneState': "available"})
     for zone in not_available_zones:
         result.append({'zoneName': zone, 'zoneState': "not available"})
     return {'availabilityZoneInfo': result}
Пример #60
0
    def create_volume(self, context, volume_id):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context, volume_id, {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(
                _("volume %(vol_name)s: creating lv of"
                  " size %(vol_size)sG") % locals())
            model_update = self.driver.create_volume(volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)
        except Exception:
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'error'})
            raise

        now = datetime.datetime.utcnow()
        self.db.volume_update(context, volume_ref['id'], {
            'status': 'available',
            'launched_at': now
        })
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        return volume_id