예제 #1
0
    def create_snapshot(self, context, volume_id, snapshot_id):
        """Creates and exports the snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])

        try:
            snap_name = snapshot_ref['name']
            LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
            model_update = self.driver.create_snapshot(snapshot_ref)
            if model_update:
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        model_update)

        except Exception:
            with utils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error'})

        self.db.snapshot_update(context,
                                snapshot_ref['id'], {'status': 'available',
                                                     'progress': '100%'})
        LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
        return snapshot_id
예제 #2
0
    def _provision_volume(self, context, vol, vsa_id, availability_zone):

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        now = utils.utcnow()
        options = {
            'size': vol['size'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': None,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': vol['name'],
            'display_description': vol['description'],
            'volume_type_id': vol['volume_type_id'],
            'metadata': dict(to_vsa_id=vsa_id),
        }

        size = vol['size']
        host = vol['host']
        name = vol['name']
        LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
                    "host %(host)s"), locals())

        volume_ref = db.volume_create(context.elevated(), options)
        driver.cast_to_volume_host(context,
                                   vol['host'],
                                   'create_volume',
                                   volume_id=volume_ref['id'],
                                   snapshot_id=None)
예제 #3
0
    def _provision_volume(self, context, vol, vsa_id, availability_zone):

        if availability_zone is None:
            availability_zone = FLAGS.storage_availability_zone

        now = utils.utcnow()
        options = {
            'size': vol['size'],
            'user_id': context.user_id,
            'project_id': context.project_id,
            'snapshot_id': None,
            'availability_zone': availability_zone,
            'status': "creating",
            'attach_status': "detached",
            'display_name': vol['name'],
            'display_description': vol['description'],
            'volume_type_id': vol['volume_type_id'],
            'metadata': dict(to_vsa_id=vsa_id),
            }

        size = vol['size']
        host = vol['host']
        name = vol['name']
        LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
                    "host %(host)s"), locals())

        volume_ref = db.volume_create(context.elevated(), options)
        driver.cast_to_volume_host(context, vol['host'],
                'create_volume', volume_id=volume_ref['id'],
                snapshot_id=None)
예제 #4
0
    def create_snapshot(self, context, volume_id, snapshot_id):
        """Creates and exports the snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)
        LOG.info(_("snapshot %s: creating"), snapshot_ref['name'])

        try:
            snap_name = snapshot_ref['name']
            LOG.debug(_("snapshot %(snap_name)s: creating") % locals())
            model_update = self.driver.create_snapshot(snapshot_ref)
            if model_update:
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        model_update)

        except Exception:
            with utils.save_and_reraise_exception():
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        {'status': 'error'})

        self.db.snapshot_update(context, snapshot_ref['id'], {
            'status': 'available',
            'progress': '100%'
        })
        LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
        return snapshot_id
예제 #5
0
    def _check_host_enforcement(self, context, availability_zone):
        if (availability_zone and ':' in availability_zone
                and context.is_admin):
            zone, _x, host = availability_zone.partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'engine-volume')
            if not self.service_is_up(service):
                raise exception.WillNotSchedule(host=host)

            return host
        else:
            return None
예제 #6
0
 def get_dhcp_hosts_text(self, context, subnet_id, project_id=None):
     ips = self.ipam.get_allocated_ips(context, subnet_id, project_id)
     hosts_text = ""
     admin_context = context.elevated()
     for ip in ips:
         address, vif_id = ip
         vif = db.virtual_interface_get_by_uuid(admin_context, vif_id)
         mac_address = vif['address']
         text = "%s,%s.%s,%s\n" % (mac_address, "host-" + address,
             FLAGS.dhcp_domain, address)
         hosts_text += text
     LOG.debug("DHCP hosts: %s" % hosts_text)
     return hosts_text
예제 #7
0
    def _check_host_enforcement(self, context, availability_zone):
        if (availability_zone
            and ':' in availability_zone
            and context.is_admin):
            zone, _x, host = availability_zone.partition(':')
            service = db.service_get_by_args(context.elevated(), host,
                                             'engine-volume')
            if not self.service_is_up(service):
                raise exception.WillNotSchedule(host=host)

            return host
        else:
            return None
예제 #8
0
    def deallocate_for_instance(self, context, **kwargs):
        """Called when a VM is terminated.  Loop through each virtual
           interface in the Engine DB and remove the Quantum port and
           clear the IP allocation using the IPAM.  Finally, remove
           the virtual interfaces from the Engine DB.
        """
        instance_id = kwargs.get('instance_id')
        project_id = kwargs.pop('project_id', None)

        admin_context = context.elevated()
        vifs = db.virtual_interface_get_by_instance(admin_context,
                                                    instance_id)
        for vif_ref in vifs:
            interface_id = vif_ref['uuid']
            q_tenant_id = project_id

            network_ref = db.network_get(admin_context, vif_ref['network_id'])
            net_id = network_ref['uuid']

            port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
                                                         net_id, interface_id)
            if not port_id:
                q_tenant_id = FLAGS.quantum_default_tenant_id
                port_id = self.q_conn.get_port_by_attachment(
                    q_tenant_id, net_id, interface_id)

            if not port_id:
                LOG.error("Unable to find port with attachment: %s" %
                          (interface_id))
            else:
                self.q_conn.detach_and_delete_port(q_tenant_id,
                                                   net_id, port_id)

            ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
                net_id, vif_ref['uuid'], project_id)

            self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
                                            net_id, vif_ref)

            # If DHCP is enabled on this network then we need to update the
            # leases and restart the server.
            if FLAGS.quantum_use_dhcp:
                self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref,
                    project_id)
        try:
            db.virtual_interface_delete_by_instance(admin_context,
                                                    instance_id)
        except exception.InstanceNotFound:
            LOG.error(_("Attempted to deallocate non-existent instance: %s" %
                        (instance_id)))
예제 #9
0
    def delete_snapshot(self, context, snapshot_id):
        """Deletes and unexports snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)

        try:
            LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
            self.driver.delete_snapshot(snapshot_ref)
        except Exception:
            with utils.save_and_reraise_exception():
                self.db.snapshot_update(context, snapshot_ref['id'],
                                        {'status': 'error_deleting'})

        self.db.snapshot_destroy(context, snapshot_id)
        LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
        return True
예제 #10
0
    def delete_snapshot(self, context, snapshot_id):
        """Deletes and unexports snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.snapshot_get(context, snapshot_id)

        try:
            LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
            self.driver.delete_snapshot(snapshot_ref)
        except Exception:
            with utils.save_and_reraise_exception():
                self.db.snapshot_update(context,
                                        snapshot_ref['id'],
                                        {'status': 'error_deleting'})

        self.db.snapshot_destroy(context, snapshot_id)
        LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
        return True
예제 #11
0
    def create_volume(self, context, volume_id, snapshot_id=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context,
                              volume_id,
                              {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(_("volume %(vol_name)s: creating lv of"
                    " size %(vol_size)sG") % locals())
            if snapshot_id is None:
                model_update = self.driver.create_volume(volume_ref)
            else:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(
                    volume_ref,
                    snapshot_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)
        except Exception:
            with utils.save_and_reraise_exception():
                self.db.volume_update(context,
                                      volume_ref['id'], {'status': 'error'})
                self._notify_vsa(context, volume_ref, 'error')

        now = utils.utcnow()
        self.db.volume_update(context,
                              volume_ref['id'], {'status': 'available',
                                                 'launched_at': now})
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        self._notify_vsa(context, volume_ref, 'available')
        self._reset_stats()
        return volume_id
예제 #12
0
 def get_dhcp_leases(self, context, network_ref):
     """Return a network's hosts config in dnsmasq leasefile format."""
     subnet_id = network_ref['uuid']
     project_id = network_ref['project_id']
     ips = self.ipam.get_allocated_ips(context, subnet_id, project_id)
     leases_text = ""
     admin_context = context.elevated()
     for ip in ips:
         address, vif_id = ip
         vif = db.virtual_interface_get_by_uuid(admin_context, vif_id)
         mac_address = vif['address']
         text = "%s %s %s %s *\n" % \
             (int(time.time()) - FLAGS.dhcp_lease_time,
              mac_address, address, '*')
         leases_text += text
     LOG.debug("DHCP leases: %s" % leases_text)
     return leases_text
예제 #13
0
    def create_volume(self, context, volume_id, snapshot_id=None):
        """Creates and exports the volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        LOG.info(_("volume %s: creating"), volume_ref['name'])

        self.db.volume_update(context, volume_id, {'host': self.host})
        # NOTE(vish): so we don't have to get volume from db again
        #             before passing it to the driver.
        volume_ref['host'] = self.host

        try:
            vol_name = volume_ref['name']
            vol_size = volume_ref['size']
            LOG.debug(
                _("volume %(vol_name)s: creating lv of"
                  " size %(vol_size)sG") % locals())
            if snapshot_id is None:
                model_update = self.driver.create_volume(volume_ref)
            else:
                snapshot_ref = self.db.snapshot_get(context, snapshot_id)
                model_update = self.driver.create_volume_from_snapshot(
                    volume_ref, snapshot_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)

            LOG.debug(_("volume %s: creating export"), volume_ref['name'])
            model_update = self.driver.create_export(context, volume_ref)
            if model_update:
                self.db.volume_update(context, volume_ref['id'], model_update)
        except Exception:
            with utils.save_and_reraise_exception():
                self.db.volume_update(context, volume_ref['id'],
                                      {'status': 'error'})
                self._notify_vsa(context, volume_ref, 'error')

        now = utils.utcnow()
        self.db.volume_update(context, volume_ref['id'], {
            'status': 'available',
            'launched_at': now
        })
        LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
        self._notify_vsa(context, volume_ref, 'available')
        self._reset_stats()
        return volume_id
예제 #14
0
    def delete_volume(self, context, volume_id):
        """Deletes and unexports volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['attach_status'] == "attached":
            raise exception.Error(_("Volume is still attached"))
        if volume_ref['host'] != self.host:
            raise exception.Error(_("Volume is not local to this node"))

        self._reset_stats()
        try:
            LOG.debug(_("volume %s: removing export"), volume_ref['name'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug(_("volume %s: deleting"), volume_ref['name'])
            self.driver.delete_volume(volume_ref)
        except exception.VolumeIsBusy, e:
            LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
            self.driver.ensure_export(context, volume_ref)
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'available'})
            return True
예제 #15
0
    def delete_volume(self, context, volume_id):
        """Deletes and unexports volume."""
        context = context.elevated()
        volume_ref = self.db.volume_get(context, volume_id)
        if volume_ref['attach_status'] == "attached":
            raise exception.Error(_("Volume is still attached"))
        if volume_ref['host'] != self.host:
            raise exception.Error(_("Volume is not local to this node"))

        self._reset_stats()
        try:
            LOG.debug(_("volume %s: removing export"), volume_ref['name'])
            self.driver.remove_export(context, volume_ref)
            LOG.debug(_("volume %s: deleting"), volume_ref['name'])
            self.driver.delete_volume(volume_ref)
        except exception.VolumeIsBusy, e:
            LOG.debug(_("volume %s: volume is busy"), volume_ref['name'])
            self.driver.ensure_export(context, volume_ref)
            self.db.volume_update(context, volume_ref['id'],
                                  {'status': 'available'})
            return True
예제 #16
0
 def setup_console(self, context, console):
     """Sets up actual proxies."""
     self._rebuild_xvp_conf(context.elevated())
예제 #17
0
 def teardown_console(self, context, console):
     """Tears down actual proxies."""
     self._rebuild_xvp_conf(context.elevated())
예제 #18
0
파일: xvp.py 프로젝트: wendy-king/x7_venv
 def setup_console(self, context, console):
     """Sets up actual proxies."""
     self._rebuild_xvp_conf(context.elevated())
예제 #19
0
    def get_instance_nw_info(self, context, instance_id,
                                instance_type_id, host):
        """This method is used by compute to fetch all network data
           that should be used when creating the VM.

           The method simply loops through all virtual interfaces
           stored in the engine DB and queries the IPAM lib to get
           the associated IP data.

           The format of returned data is 'defined' by the initial
           set of NetworkManagers found in engine/network/manager.py .
           Ideally this 'interface' will be more formally defined
           in the future.
        """
        network_info = []
        instance = db.instance_get(context, instance_id)
        project_id = instance.project_id

        admin_context = context.elevated()
        vifs = db.virtual_interface_get_by_instance(admin_context,
                                                    instance_id)
        for vif in vifs:
            net = db.network_get(admin_context, vif['network_id'])
            net_id = net['uuid']

            if not net_id:
                # TODO(bgh): We need to figure out a way to tell if we
                # should actually be raising this exception or not.
                # In the case that a VM spawn failed it may not have
                # attached the vif and raising the exception here
                # prevents deletion of the VM.  In that case we should
                # probably just log, continue, and move on.
                raise Exception(_("No network for for virtual interface %s") %
                                vif['uuid'])

            ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
                net_id, vif['uuid'], project_id)
            v4_subnet, v6_subnet = \
                    self.ipam.get_subnets_by_net_id(context,
                            ipam_tenant_id, net_id, vif['uuid'])

            v4_ips = self.ipam.get_v4_ips_by_interface(context,
                                        net_id, vif['uuid'],
                                        project_id=ipam_tenant_id)
            v6_ips = self.ipam.get_v6_ips_by_interface(context,
                                        net_id, vif['uuid'],
                                        project_id=ipam_tenant_id)

            def ip_dict(ip, subnet):
                return {
                    "ip": ip,
                    "netmask": subnet["netmask"],
                    "enabled": "1"}

            network_dict = {
                'cidr': v4_subnet['cidr'],
                'injected': True,
                'bridge': net['bridge'],
                'multi_host': False}

            q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
            info = {
                'label': self.q_conn.get_network_name(q_tenant_id, net_id),
                'gateway': v4_subnet['gateway'],
                'dhcp_server': v4_subnet['gateway'],
                'broadcast': v4_subnet['broadcast'],
                'mac': vif['address'],
                'vif_uuid': vif['uuid'],
                'dns': [],
                'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]}

            if v6_subnet:
                if v6_subnet['cidr']:
                    network_dict['cidr_v6'] = v6_subnet['cidr']
                    info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips]

                if v6_subnet['gateway']:
                    info['gateway_v6'] = v6_subnet['gateway']

            dns_dict = {}
            for s in [v4_subnet, v6_subnet]:
                for k in ['dns1', 'dns2']:
                    if s and s[k]:
                        dns_dict[s[k]] = None
            info['dns'] = [d for d in dns_dict.keys()]

            network_info.append((network_dict, info))
        return network_info
예제 #20
0
파일: xvp.py 프로젝트: wendy-king/x7_venv
 def teardown_console(self, context, console):
     """Tears down actual proxies."""
     self._rebuild_xvp_conf(context.elevated())
예제 #21
0
    def allocate_for_instance(self, context, **kwargs):
        """Called by compute when it is creating a new VM.

           There are three key tasks:
                - Determine the number and order of vNICs to create
                - Allocate IP addresses
                - Create ports on a Quantum network and attach vNICs.

           We support two approaches to determining vNICs:
                - By default, a VM gets a vNIC for any network belonging
                  to the VM's project, and a vNIC for any "global" network
                  that has a NULL project_id.  vNIC order is determined
                  by the network's 'priority' field.
                - If the 'os-create-server-ext' was used to create the VM,
                  only the networks in 'requested_networks' are used to
                  create vNICs, and the vNIC order is determiend by the
                  order in the requested_networks array.

           For each vNIC, use the FlatManager to create the entries
           in the virtual_interfaces table, contact Quantum to
           create a port and attachment the vNIC, and use the IPAM
           lib to allocate IP addresses.
        """
        instance_id = kwargs.pop('instance_id')
        instance_type_id = kwargs['instance_type_id']
        host = kwargs.pop('host')
        project_id = kwargs.pop('project_id')
        LOG.debug(_("network allocations for instance %s"), project_id)

        requested_networks = kwargs.get('requested_networks')

        if requested_networks:
            net_proj_pairs = [(net_id, project_id) \
                for (net_id, _i) in requested_networks]
        else:
            net_proj_pairs = self.ipam.get_project_and_global_net_ids(context,
                                                                project_id)

        # Quantum may also know about networks that aren't in the networks
        # table so we need to query Quanutm for any tenant networks and add
        # them to net_proj_pairs.
        qnets = self.q_conn.get_networks(project_id)
        for qn in qnets['networks']:
            pair = (qn['id'], project_id)
            if pair not in net_proj_pairs:
                net_proj_pairs.append(pair)

        # Create a port via quantum and attach the vif
        for (quantum_net_id, project_id) in net_proj_pairs:
            # FIXME(danwent): We'd like to have the manager be
            # completely decoupled from the engine networks table.
            # However, other parts of engine sometimes go behind our
            # back and access network data directly from the DB.  So
            # for now, the quantum manager knows that there is a engine
            # networks DB table and accesses it here.  updating the
            # virtual_interfaces table to use UUIDs would be one
            # solution, but this would require significant work
            # elsewhere.
            admin_context = context.elevated()

            # We may not be able to get a network_ref here if this network
            # isn't in the database (i.e. it came from Quantum).
            network_ref = db.network_get_by_uuid(admin_context,
                                                 quantum_net_id)
            if network_ref is None:
                network_ref = {}
                network_ref = {"uuid": quantum_net_id,
                    "project_id": project_id,
                    # NOTE(bgh): We need to document this somewhere but since
                    # we don't know the priority of any networks we get from
                    # quantum we just give them a priority of 0.  If its
                    # necessary to specify the order of the vifs and what
                    # network they map to then the user will have to use the
                    # OSCreateServer extension and specify them explicitly.
                    #
                    # In the future users will be able to tag quantum networks
                    # with a priority .. and at that point we can update the
                    # code here to reflect that.
                    "priority": 0,
                    "id": 'NULL',
                    "label": "quantum-net-%s" % quantum_net_id}

            vif_rec = self.add_virtual_interface(context,
                                                 instance_id,
                                                 network_ref['id'])

            # talk to Quantum API to create and attach port.
            instance = db.instance_get(context, instance_id)
            instance_type = instance_types.get_instance_type(instance_type_id)
            rxtx_factor = instance_type['rxtx_factor']
            engine_id = self._get_engine_id(context)
            q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
            self.q_conn.create_and_attach_port(q_tenant_id, quantum_net_id,
                                               vif_rec['uuid'],
                                               vm_id=instance['uuid'],
                                               rxtx_factor=rxtx_factor,
                                               engine_id=engine_id)
            # Tell melange to allocate an IP
            ip = self.ipam.allocate_fixed_ip(context, project_id,
                    quantum_net_id, vif_rec)
            # Set up/start the dhcp server for this network if necessary
            if FLAGS.quantum_use_dhcp:
                self.enable_dhcp(context, quantum_net_id, network_ref,
                    vif_rec, project_id)
        return self.get_instance_nw_info(context, instance_id,
                                         instance_type_id, host)