Esempio n. 1
0
 def provide_session_for_this_resource(self, session):
     if session is None:
         session = object_session(self)
     if session is None:
         session = db_session.get_session()
         session.add(self)
     return session
Esempio n. 2
0
    def save(self, session=None):
        from nova.db.sqlalchemy import api

        if session is None:
            session = api.get_session()

        super(NovaBase, self).save(session=session)
Esempio n. 3
0
def reconcile(context, host_data, dom_factory=dom.DOM_Factory(),
              db_session=None):
    """
    This is the entry method to reconcile the host data from the system with
    that stored in the database.

    :param context: The database context.
    :param host_data: A dictionary of data that represents the latest inventory
                      information on the server.  The data should be in the
                      network DOM format.
    :param dom_factory: Optional factory used to create the DOM objects.  Not
                        required to be set.
    :param db_session: The database session.  Should be started and finalized
                       outside this class.
    """
    if not db_session:
        db_session = session.get_session()

    try:
        with db_session.begin():
            _reconcile_host(context, host_data, dom_factory, db_session)
    except Exception as e:
        _log_before_and_after(context, host_data, db_session)
        msg = ras.vif_get_msg('error', 'HOST_RECONCILE_ERROR')
        ras.function_tracepoint(LOG, __name__, ras.TRACE_ERROR, msg)
        ras.function_tracepoint(
            LOG, __name__, ras.TRACE_EXCEPTION, e.message)
        raise

    # We only want to run the used port clean up rarely, as it is expensive
    global USED_PORT_COUNT
    USED_PORT_COUNT = USED_PORT_COUNT + 1
    if USED_PORT_COUNT >= USED_PORT_THRESHOLD:
        USED_PORT_COUNT = 0
        _neutron_unused_port_cleanup(context)
    def _get_ncpu_emc_target_info_list(self):
        target_info_list = []
        # Find the targets used by VM on the compute node
        bdms = db_api.model_query(context.get_admin_context(),
                                  models.BlockDeviceMapping,
                                  session = db_api.get_session())
        bdms = bdms.filter(models.BlockDeviceMapping.connection_info != None)
        bdms = bdms.join(models.BlockDeviceMapping.instance).filter_by(
            host=string.strip(self.host_name))

        for bdm in bdms:
            conn_info = json.loads(bdm.connection_info)

            if 'data' in conn_info:
                if 'target_iqns' in conn_info['data']:
                    target_iqns = conn_info['data']['target_iqns']
                    target_luns = conn_info['data']['target_luns']
                elif 'target_iqn' in conn_info['data']:
                    target_iqns = [conn_info['data']['target_iqn']]
                    target_luns = [conn_info['data']['target_lun']]
                else:
                    target_iqns = []
                    target_luns = []
                for target_iqn, target_lun in zip(target_iqns, target_luns):
                    if 'com.emc' in target_iqn:
                        target_info = {
                            'target_iqn': target_iqn,
                            'target_lun': target_lun,
                        }
                        target_info_list.append(target_info)

        return target_info_list
Esempio n. 5
0
    def _get_orphan_pairs(self):
        """
        Uses the DOM topology to get a list of all VLANs/VSwitches which are
        orphaned.

        :return: List of VSwitch/VLAN ID pairs.  The list will look like:
                 [ 'ETHERNET0:12', 'ETHERNET1:43']
        """
        # Read the VIOS adapters from the database
        session = db_session.get_session()
        orphan_pairs = []
        with session.begin():
            vios_list = db_api.vio_server_find_all(context.get_admin_context(),
                                                   CONF.host, session)
            host = dom_model.Host(CONF.host, vios_list)
            for vios in vios_list:
                orphan_veas = vios.get_orphan_virtual_ethernet_adapters()
                for vea in orphan_veas:
                    pair_string = '%(vs)s:%(vlan)d' % {'vs': vea.vswitch_name,
                                                       'vlan': vea.pvid}
                    if not host.is_vlan_vswitch_on_sea(vea.pvid,
                                                       vea.vswitch_name):
                        orphan_pairs.append(pair_string)

                    for addl_vlan in vea.addl_vlan_ids:
                        pair_string = ('%(vs)s:%(vlan)d' %
                                       {'vs': vea.vswitch_name,
                                        'vlan': addl_vlan})
                        if not host.is_vlan_vswitch_on_sea(addl_vlan,
                                                           vea.vswitch_name):
                            orphan_pairs.append(pair_string)

        return orphan_pairs
Esempio n. 6
0
def getdetail(self,req):
        
        isql = "select p.address as address, i.uuid as uuid, i.project_id as project_id , i.workload_type as workload_type, i.policy as policy from pci_devices p left outer join instances i on i.uuid=p.instance_uuid where p.status='allocated'"
        session = model_api.get_session()
        get_list = session.query("address" ,"uuid" , "project_id","workload_type","policy").from_statement(isql).all()
        if get_list:
           return {'getdetail': get_list}
        else:
           return {'getdetail': {}}
Esempio n. 7
0
def myproject_host_update(context, host_name, values):
    session = get_session()
    with session.begin():
        host_ref = myproject_host_get(context, host_name, 
                                      session=session, 
                                      check_update=False)
        if host_ref:
            host_ref.update(values)
            host_ref.save(session=session)
        else:
            values['host_name'] = host_name
            myproject_host_create(context, values)
    return host_ref
Esempio n. 8
0
def myproject_host_update(context, host_name, values):
    session = get_session()
    with session.begin():
        host_ref = myproject_host_get(context,
                                      host_name,
                                      session=session,
                                      check_update=False)
        if host_ref:
            host_ref.update(values)
            host_ref.save(session=session)
        else:
            values['host_name'] = host_name
            myproject_host_create(context, values)
    return host_ref
Esempio n. 9
0
def get_active_instances(period_length):
    context = RequestContext('1', '1', is_admin=True)
    start, end = get_previous_period(datetime.datetime.utcnow(), period_length)
    session = api.get_session()
    computes = novadb.service_get_all_by_topic(context, 'compute')
    active_instances = []
    for compute in computes:
        query = session.query(novamodels.Instance)

        query = query.filter(api.or_(novamodels.Instance.terminated_at == None,
                                     novamodels.Instance.terminated_at > start))
        query = query.filter_by(host=compute.host)

        for instance in query.all():
            active_instances.append(instance)
    return active_instances
Esempio n. 10
0
def get_active_instances(period_length):
    start, end = get_previous_period(datetime.datetime.utcnow(), period_length)
    session = sqlapi.get_session()
    computes = get_computes()
    active_instances = []
    yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
    for compute in computes:
        if compute.updated_at > yesterday:
            query = session.query(novamodels.Instance)
            active_filter = (novamodels.Instance.terminated_at == None,
                             novamodels.Instance.terminated_at > start)
            query = query.filter(sqlapi.or_(*active_filter))
            query = query.filter_by(host=compute.host)

            for instance in query.all():
                active_instances.append(instance)
    return active_instances
Esempio n. 11
0
def get_active_instances(period_length):
    start, end = get_previous_period(datetime.datetime.utcnow(), period_length)
    session = sqlapi.get_session()
    computes = get_computes()
    active_instances = []
    yesterday = datetime.datetime.utcnow() - datetime.timedelta(days=1)
    for compute in computes:
        if compute.updated_at > yesterday:
            query = session.query(novamodels.Instance)
            active_filter = (novamodels.Instance.terminated_at == None,
                             novamodels.Instance.terminated_at > start)
            query = query.filter(sqlapi.or_(*active_filter))
            query = query.filter_by(host=compute.host)

            for instance in query.all():
                active_instances.append(instance)
    return active_instances
Esempio n. 12
0
def cleanup_network_associations(context, host_name, db_session=None):
    """
    Removes network associations for deleted networks
    periodically
    :param context: The context for building the data
    :param host_name: The name of the host
    """

    LOG.debug('Entry: network associations cleanup task')
    if not db_session:
        db_session = session.get_session()

    nova_net_api = net_root.API()
    neutron_network_ids = set()
    net_assn_neutron_ids = set()
    # retrieve existing quantum networks
    existing_neutron_networks = nova_net_api.get_all(context)
    for neutron_network in existing_neutron_networks:
        neutron_network_ids.add(neutron_network.get("id"))

    with db_session.begin():
        # retrieve unique network ids from existing network associations
        existing_association_networks = db.\
            network_association_find_distinct_networks(
                context, host_name, db_session)

        for network_in_ass in existing_association_networks:
            net_assn_neutron_ids.add(network_in_ass[0])

        deleted_network_ids = net_assn_neutron_ids - \
            neutron_network_ids

        # delete associations of deleted networks
        for networkid in deleted_network_ids:
            LOG.debug('network associations cleanup'
                      ' for networkid ' + str(networkid))

            net_associations_to_delete = db.\
                network_association_find_all_by_network(
                    context, networkid, db_session)
            for net_association in net_associations_to_delete:
                net_association.delete(context, db_session)
    LOG.debug('Exit: network associations cleanup task')
Esempio n. 13
0
    def _get_ncpu_emc_target_info_list(self):
        target_info_list = []
        # Find the targets used by VM on the compute node
        bdms = db_api.model_query(context.get_admin_context(),
                                  models.BlockDeviceMapping,
                                  session=db_api.get_session())
        bdms = bdms.filter(models.BlockDeviceMapping.connection_info != None)
        bdms = bdms.join(models.BlockDeviceMapping.instance).filter_by(
            host=string.strip(self.host_name))

        for bdm in bdms:
            conn_info = json.loads(bdm.connection_info)

            if conn_info is not None and 'data' in conn_info:
                if 'target_iqns' in conn_info['data']:
                    target_iqns = conn_info['data']['target_iqns']
                    # Compatible check for VNX icehouse driver
                    if 'target_luns' in conn_info['data']:
                        target_luns = conn_info['data']['target_luns']
                    else:
                        target_luns = ([conn_info['data']['target_lun']] *
                                       len(target_iqns))
                elif 'target_iqn' in conn_info['data']:
                    target_iqns = [conn_info['data']['target_iqn']]
                    target_luns = [conn_info['data']['target_lun']]
                else:
                    target_iqns = []
                    target_luns = []
                for target_iqn, target_lun in zip(target_iqns, target_luns):
                    if 'com.emc' in target_iqn:
                        target_info = {
                            'target_iqn': target_iqn,
                            'target_lun': target_lun,
                        }
                        target_info_list.append(target_info)

        return target_info_list
Esempio n. 14
0
 def test_get_all_instance_types(self):
     # Ensures that all flavors can be retrieved.
     session = sql_session.get_session()
     total_instance_types = session.query(models.InstanceTypes).count()
     inst_types = flavors.get_all_flavors()
     self.assertEqual(total_instance_types, len(inst_types))
Esempio n. 15
0
def _reconcile_host(context, host_data, dom_factory=dom.DOM_Factory(),
                    db_session=None):
    """
    Performs the actual reconciliation at the host level

    :param context: The database context.
    :param host_data: A dictionary of data that represents the latest inventory
                      information on the server.  The data should be in the
                      network DOM format.
    :param dom_factory: Optional factory used to create the DOM objects.  Not
                        required to be set.
    :param db_session: The database session.  Should be started and finalized
                       outside this class.
    """
    if not db_session:
        db_session = session.get_session()

    # Parse the inventory data into a DOM object.  Use the no_db DOM factory
    # as we want to parse into non-DB backed elements to start...
    non_db_fact = dom.No_DB_DOM_Factory()
    server_dom = dom.parse_to_host(host_data, non_db_fact)

    msg = (ras.vif_get_msg('info', 'RECONCILE_HOST_START') %
           {'host': server_dom.host_name})
    ras.trace(LOG, __name__, ras.TRACE_DEBUG, msg)

    # Get the inventory data from the database.
    db_vio_servers = db.vio_server_find_all(context, server_dom.host_name,
                                            db_session)

    # If there are no VIO Servers, the system may be turned off.  It is very
    # unlikely that they are actually removed (all of them at least).
    # Therefore, we flip the SEAs in each VioServer to a state of unavailable
    # and they do not show up in the UI...but are not deleted.
    if len(server_dom.vio_servers) == 0:
        LOG.info(_("Flipping host %s to unavailable due to lack of VioServers"
                   % server_dom.host_name))
        _make_system_unavailable(db_vio_servers, context, db_session)
        return

    # The first step is to find VIO Servers do add/remove/modify.  Those are
    # the three passes that need to be made.
    #
    # We start with the idea that all of the data base items should be removed.
    # From there, we parse down which are still on the system (therefore need
    # to be modified) and then the new adds.
    db_vios_to_del = dom.shallow_copy_as_ordinary_list(db_vio_servers)
    srv_vios_to_add = []
    srv_vios_to_modify = []

    for vio_server in server_dom.vio_servers:
        db_vios = _find_vios(db_vio_servers, vio_server.lpar_id)
        if db_vios:
            srv_vios_to_modify.append(vio_server)
            db_vios_to_del.remove(db_vios)
        else:
            srv_vios_to_add.append(vio_server)

    # Now that we know what to modify/create/delete...loop through each and
    # execute the commands to reconcile
    db_host_dom = dom.Host(server_dom.host_name, db_vio_servers)

    # Save off the network associations first so we can recreate any that
    # need to be later on.
    net_assns = _build_net_assn_dict(
        db.network_association_find_all(context,
                                        db_host_dom.host_name,
                                        db_session))

    for db_vios in db_vios_to_del:
        _remove_vios(db_vios, db_host_dom, context, db_session)
    for server_vios in srv_vios_to_modify:
        _reconcile_vios(_find_vios(db_vio_servers, server_vios.lpar_id),
                        server_vios, context, db_session, dom_factory)
    for server_vios in srv_vios_to_add:
        _add_vios(server_vios, db_host_dom, context, db_session, dom_factory)

    msg = (ras.vif_get_msg('info', 'RECONCILE_HOST_END') %
           {'host': server_dom.host_name})
    ras.trace(LOG, __name__, ras.TRACE_DEBUG, msg)

    # Cleanup NetworkAssociations in case any VIOSes went away or came back.
    _cleanup_network_associations(db_host_dom, net_assns, context, db_session)
Esempio n. 16
0
    def get_host_seas(self, context, host_name=None, vswitch=None, vlan=None,
                      net_id=None, session=None):
        """
        This method will return a dictionary of data that represents the
        Shared Ethernet Adapter information for a given host or a set of hosts.
        If vlan or net_id are passed in, then only SEAs that are valid for the
        given vlan or net_id will be returned.

        :param context: The context for the request.
        :param host_name: The identifying name of the host to request the data
                          If None is passed in, a dictionary representing all
                          of the managed hosts will be found.
        :param vswitch: The vswitch that should be used to help identify the
                        default adapter.  If set to None (the default value),
                        only the VLAN ID will be used.  ie, all vswitches will
                        be candidates.
        :param vlan: The vlan that should be used to help identify the default
                     and candidate adapters.  If set to None (the default
                     value), a VLAN ID of 1 will be used.  This parameter will
                     be ignored if net_id is passed in.
        :param net_id: The network UUID for an existing neutron network.  If
                       this is passed in, then vlan will be ignored and the
                       vlan to use will be obtained from the neutron network.
        :param session: session to be used for db access

        :return: A dictionary of host level Shared Ethernet Adapter data.  Ex:
        {
         "host-seas": [
            {
             "host_name": "host1",
             "adapters": [
                {
                 "default": false,
                 "sea_name": "ent11",
                 "vswitch": "ETHERNET0",
                 "lpar_id": 1,
                 "ha_lpar_id": 2,
                 "ha_mode": "enabled",
                 "pvid": 1,
                 "state": "Available",
                 "ha_state": "Available",
                 "lpar_name": "10-23C2P",
                 "ha_lpar_name": "10-24C2P",
                 "ha_sea": "ent21"
                },
                {
                 "default": false,
                 "sea_name": "ent12",
                 "vswitch": "ETHERNET0",
                 "lpar_id": 1,
                 "ha_lpar_id": 2,
                 "ha_mode": "enabled",
                 "pvid": 2,
                 "state": "Available",
                 "ha_state": "Available",
                 "lpar_name": "10-23C2P",
                 "ha_lpar_name": "10-24C2P",
                 "ha_sea": "ent22"
                }
             ]
            },
            {
             "host_name": "host2",
             "adapters": [
                {
                 "default": true,
                 "sea_name": "ent5",
                 "vswitch": "ETHERNET0",
                 "lpar_id": 1,
                 "ha_lpar_id": null,
                 "ha_mode": "disabled",
                 "pvid": 1,
                 "state": "Available",
                 "ha_state": null,
                 "lpar_name": "15-34B9Z",
                 "ha_lpar_name": null,
                 "ha_sea": null
                }
             ]
            }
         ]
        }
        """
        ras.function_tracepoint(LOG, __name__, ras.TRACE_INFO,
                                ras.vif_get_msg('info', 'GET_HOST_SEAS') %
                                {'host': host_name, 'vlan': vlan,
                                 'vswitch': vswitch, 'net_id': net_id})

        # This class should only be used in PowerVM environments
        self.raise_if_not_powervm()

        if session is None:
            session = db_session.get_session()

        hosts = self._find_all_host_names(context)
        if host_name:
            if host_name in hosts:
                # Should make it empty before we add for below for loop
                # We want to be specific that now it has only one host
                hosts = [host_name]

            else:
                msg = (ras.vif_get_msg
                      ('info', 'HOST_NOT_FOUND') %
                       {'hostid': host_name})
                ras.function_tracepoint(
                    LOG, __name__, ras.TRACE_INFO, msg)
                raise exception.ComputeHostNotFound(host=host_name)

        # Validate that network exists
        if net_id or net_id == '':
            try:
                network.API().get(context, net_id)
            except:
                raise exception.InvalidID(id=net_id)

        host_dict_list = []

        with session.begin():

            ports = None
            if net_id:
                # Performance optimization -- read network ports before loop
                # because this operation is expensive
                search_opts = {'network_id': net_id}
                network_data = network.API().list_ports(context, **search_opts)
                ports = network_data.get('ports', [])

            for host in hosts:
                resp = self._get_specific_host_seas(context, host, vswitch,
                                                    vlan, net_id, session,
                                                    ports)
                host_dict_list.append(resp)

        return {
            'host-seas': host_dict_list
        }
Esempio n. 17
0
 def test_get_all_instance_types(self):
     # Ensures that all flavors can be retrieved.
     session = sql_session.get_session()
     total_instance_types = session.query(models.InstanceTypes).count()
     inst_types = flavors.get_all_flavors()
     self.assertEqual(total_instance_types, len(inst_types))
Esempio n. 18
0
from nova.db.sqlalchemy import api as model_api
from nova.db.sqlalchemy.models import PciDevice, Instance, ComputeNode
import collections
#, VFAllocation

session = model_api.get_session()
WORK_LOAD = ["cp","cr"]


def execute_vf_allocation(req_vf,los,req_work,bus_list, *args,**kwargs):
    """This method is called from nova.scheduler.filter_scheduler.FilterScheduler"""
    base_dict = collections.OrderedDict()    
    get_bus_slot = session.query(PciDevice).from_statement("select id,bus,slot from pci_devices where status = :status GROUP BY bus, slot").params(status="available").all()
    obj_list = [obj for obj in get_bus_slot if obj.bus in bus_list]
    if not obj_list:
        return []
    
    """ CLEAR VF_ALLOCATION  TABLE DATA """
    session.execute("truncate vf_allocation")

    """ Get list of PCI devices for Unique bus and slot (unassigned is optional) """
    for obj in obj_list:
        BUS = obj.bus
        SLOT = obj.slot
        cp_vf_assigned = []
        for j in range(len(WORK_LOAD)):

            """ Get the List of VF assigned for each Bus, Slot for workload cp and cr """
            GET_ASS_VF = """select bus,slot,function,count(workload) as count_wl from pci_devices where bus = %s and slot = %s and workload = '%s' and status = 'allocated'""" % (BUS, SLOT, str(WORK_LOAD[j]))

	    cp_vf_ass = int(session.query("count_wl").from_statement(GET_ASS_VF).scalar())
 def __init__(self, weighed_hosts, pci_requests):
     self.db_session = model_api.get_session()
     self.weighed_hosts = weighed_hosts
     self.pci_requests = pci_requests
     pass
def instance_get_active_by_window_joined(
    context,
    begin, end=None,
    project_id=None,
    host=None,
    use_slave=False,
    columns_to_join=None,
    metadata=None
):
    """Simulate bottom most layer.

    :param context: wsgi context
    :param begin: Datetime
    :param end: Datetime|None
    :param project_id: String|None
    :param host: String|None
    :param use_slave: Boolean
    :param columns_to_join: List|None
    :param metadata: Dict|None
    """
    if metadata:
        aliases = [aliased(models.InstanceMetadata) for i in metadata]
    else:
        aliases = []
    session = get_session(use_slave=use_slave)
    query = session.query(
        models.Instance,
        models.InstanceTypes,
        *aliases
    )

    if columns_to_join is None:
        columns_to_join_new = ['info_cache', 'security_groups']
        manual_joins = ['metadata', 'system_metadata']
    else:
        manual_joins, columns_to_join_new = (
            _manual_join_columns(columns_to_join))

    for column in columns_to_join_new:
        if 'extra.' in column:
            query = query.options(undefer(column))
        else:
            query = query.options(joinedload(column))

    query = query.filter(or_(models.Instance.terminated_at == null(),
                             models.Instance.terminated_at > begin))
    if end:
        query = query.filter(models.Instance.launched_at < end)
    if project_id:
        query = query.filter_by(project_id=project_id)
    if host:
        query = query.filter_by(host=host)

    if metadata:
        for keypair, alias in zip(metadata.items(), aliases):
            query = query.filter(alias.key == keypair[0])
            query = query.filter(alias.value == keypair[1])
            query = query.filter(alias.instance_uuid == models.Instance.uuid)
            query = query.filter(or_(
                alias.deleted_at == null(),
                alias.deleted_at == models.Instance.deleted_at
            ))

    query = query.filter(
        models.Instance.instance_type_id == models.InstanceTypes.id
    )

    flavors = []
    instances = []
    for tup in query.all():
        # Query results are in tuple form (Instance, Flavor, Meta 1, Meta 2..)
        instance = tup[0]
        instances.append(dict(instance))
        flavor = tup[1]
        flavors.append(dict(flavor))

    return (instances, flavors)
Esempio n. 21
0
 def __init__(self):
     self.db_session = model_api.get_session()