def index(self, req): # This has been revised so that it is less coupled with # the implementation of the Servers API, which is in flux # KLUDGE to make this extension working with different engine branches # in a soon future there will be only: if hasattr(self, '_items'): servers = self._items(req, is_detail=True) else: servers = self._get_servers(req, is_detail=True) # The 'id' attribute here is actually the uuid of the server ids = [server['id'] for server in servers['servers']] context = req.environ['engine.context'] session = get_session() instance_map = {} for i in session.query(models.Instance).filter( models.Instance.uuid.in_(ids)).all(): instance_map[i.uuid] = i for s in servers['servers']: s['attrs'] = self._build_extended_attributes(instance_map[s['id']]) return servers
def test_get_all_volume_types(self): """Ensures that all volume types can be retrieved""" session = get_session() total_volume_types = session.query(models.VolumeTypes).\ count() vol_types = volume_types.get_all_types(self.ctxt) self.assertEqual(total_volume_types, len(vol_types))
def save(self, session=None): """Save this object.""" if not session: session = get_session() session.add(self) try: session.flush() except IntegrityError, e: if str(e).endswith('is not unique'): raise exception.Duplicate(str(e)) else: raise
def service_get_all_compute(context): topic = 'compute' session = get_session() result = session.query(models.Service).\ options(joinedload('compute_node')).\ filter_by(deleted=False).\ filter_by(topic=topic).\ all() if not result: raise exception.ComputeHostNotFound(host=host) return result
def service_get_all_compute(context): topic = "compute" session = get_session() result = ( session.query(models.Service) .options(joinedload("compute_node")) .filter_by(deleted=False) .filter_by(topic=topic) .all() ) if not result: raise exception.ComputeHostNotFound(host=host) return result
def setUp(self): super(QuantumEngineTestCase, self).setUp() self.net_man = quantum_manager.QuantumManager( ipam_lib="engine.network.quantum.engine_ipam_lib", q_conn=FakeQuantumClientConnection()) # Tests seem to create some networks by default, which # we don't want. So we delete them. ctx = context.RequestContext('user1', 'fake_project1').elevated() for n in db.network_get_all(ctx): db.network_delete_safe(ctx, n['id']) # Other unit tests (e.g., test_compute.py) have a nasty # habit of of creating fixed IPs and not cleaning up, which # can confuse these tests, so we remove all existing fixed # ips before starting. session = get_session() result = session.query(models.FixedIp).all() with session.begin(): for fip_ref in result: session.delete(fip_ref)
def index(self, req): # This has been revised so that it is less coupled with # the implementation of the Servers API, which is in flux # KLUDGE to make this extension working with different engine branches # in a soon future there will be only: if hasattr(self, "_items"): servers = self._items(req, is_detail=True) else: servers = self._get_servers(req, is_detail=True) # The 'id' attribute here is actually the uuid of the server ids = [server["id"] for server in servers["servers"]] context = req.environ["engine.context"] session = get_session() instance_map = {} for i in session.query(models.Instance).filter(models.Instance.uuid.in_(ids)).all(): instance_map[i.uuid] = i for s in servers["servers"]: s["attrs"] = self._build_extended_attributes(instance_map[s["id"]]) return servers
def setUp(self): super(InstanceTypeTestCase, self).setUp() session = get_session()
def test_get_all_instance_types(self): """Ensures that all instance types can be retrieved""" session = get_session() total_instance_types = session.query(models.InstanceTypes).count() inst_types = instance_types.get_all_types() self.assertEqual(total_instance_types, len(inst_types))
def _usage_for_period(self, context, period_start, period_stop, tenant_id=None): fields = [ "id", "image_ref", "project_id", "user_id", "vcpus", "hostname", "display_name", "host", "task_state", "instance_type_id", "launched_at", "terminated_at", ] tenant_clause = "" if tenant_id: tenant_clause = " and project_id='%s'" % tenant_id connection = get_session().connection() rows = connection.execute( "select %s from instances where \ (terminated_at is NULL or terminated_at > '%s') \ and (launched_at < '%s') %s" % (",".join(fields), period_start.isoformat(" "), period_stop.isoformat(" "), tenant_clause) ).fetchall() rval = {} flavors = {} for row in rows: o = {} for i in range(len(fields)): o[fields[i]] = row[i] o["hours"] = self._hours_for(o, period_start, period_stop) flavor_type = o["instance_type_id"] try: flavors[flavor_type] = db.instance_type_get_by_id(context, flavor_type) except AttributeError: # The most recent version of engine renamed this function flavors[flavor_type] = db.instance_type_get(context, flavor_type) except exception.InstanceTypeNotFound: # can't bill if there is no instance type continue flavor = flavors[flavor_type] o["name"] = o["display_name"] del (o["display_name"]) o["ram_size"] = flavor["memory_mb"] o["disk_size"] = flavor["local_gb"] o["tenant_id"] = o["project_id"] del (o["project_id"]) o["flavor"] = flavor["name"] del (o["instance_type_id"]) o["started_at"] = o["launched_at"] del (o["launched_at"]) o["ended_at"] = o["terminated_at"] del (o["terminated_at"]) if o["ended_at"]: o["state"] = "terminated" else: o["state"] = o["task_state"] del (o["task_state"]) now = datetime.utcnow() if o["state"] == "terminated": delta = self._parse_datetime(o["ended_at"]) - self._parse_datetime(o["started_at"]) else: delta = now - self._parse_datetime(o["started_at"]) o["uptime"] = delta.days * 24 * 60 + delta.seconds if not o["tenant_id"] in rval: summary = {} summary["tenant_id"] = o["tenant_id"] summary["instances"] = [] summary["total_disk_usage"] = 0 summary["total_cpu_usage"] = 0 summary["total_ram_usage"] = 0 summary["total_active_ram_size"] = 0 summary["total_active_disk_size"] = 0 summary["total_active_vcpus"] = 0 summary["total_active_instances"] = 0 summary["total_hours"] = 0 summary["begin"] = period_start summary["stop"] = period_stop rval[o["tenant_id"]] = summary rval[o["tenant_id"]]["total_disk_usage"] += o["disk_size"] * o["hours"] rval[o["tenant_id"]]["total_cpu_usage"] += o["vcpus"] * o["hours"] rval[o["tenant_id"]]["total_ram_usage"] += o["ram_size"] * o["hours"] if o["state"] is not "terminated": rval[o["tenant_id"]]["total_active_ram_size"] += o["ram_size"] rval[o["tenant_id"]]["total_active_vcpus"] += o["vcpus"] rval[o["tenant_id"]]["total_active_disk_size"] += o["disk_size"] rval[o["tenant_id"]]["total_active_instances"] += 1 rval[o["tenant_id"]]["total_hours"] += o["hours"] rval[o["tenant_id"]]["instances"].append(o) return rval.values()
def _usage_for_period(self, context, period_start, period_stop, tenant_id=None): fields = [ 'id', 'image_ref', 'project_id', 'user_id', 'vcpus', 'hostname', 'display_name', 'host', 'task_state', 'instance_type_id', 'launched_at', 'terminated_at' ] tenant_clause = '' if tenant_id: tenant_clause = " and project_id='%s'" % tenant_id connection = get_session().connection() rows = connection.execute("select %s from instances where \ (terminated_at is NULL or terminated_at > '%s') \ and (launched_at < '%s') %s" %\ (','.join(fields), period_start.isoformat(' '),\ period_stop.isoformat(' '), tenant_clause )).fetchall() rval = {} flavors = {} for row in rows: o = {} for i in range(len(fields)): o[fields[i]] = row[i] o['hours'] = self._hours_for(o, period_start, period_stop) flavor_type = o['instance_type_id'] try: flavors[flavor_type] = \ db.instance_type_get_by_id(context, flavor_type) except AttributeError: # The most recent version of engine renamed this function flavors[flavor_type] = \ db.instance_type_get(context, flavor_type) except exception.InstanceTypeNotFound: # can't bill if there is no instance type continue flavor = flavors[flavor_type] o['name'] = o['display_name'] del (o['display_name']) o['ram_size'] = flavor['memory_mb'] o['disk_size'] = flavor['local_gb'] o['tenant_id'] = o['project_id'] del (o['project_id']) o['flavor'] = flavor['name'] del (o['instance_type_id']) o['started_at'] = o['launched_at'] del (o['launched_at']) o['ended_at'] = o['terminated_at'] del (o['terminated_at']) if o['ended_at']: o['state'] = 'terminated' else: o['state'] = o['task_state'] del (o['task_state']) now = datetime.utcnow() if o['state'] == 'terminated': delta = self._parse_datetime(o['ended_at'])\ - self._parse_datetime(o['started_at']) else: delta = now - self._parse_datetime(o['started_at']) o['uptime'] = delta.days * 24 * 60 + delta.seconds if not o['tenant_id'] in rval: summary = {} summary['tenant_id'] = o['tenant_id'] summary['instances'] = [] summary['total_disk_usage'] = 0 summary['total_cpu_usage'] = 0 summary['total_ram_usage'] = 0 summary['total_active_ram_size'] = 0 summary['total_active_disk_size'] = 0 summary['total_active_vcpus'] = 0 summary['total_active_instances'] = 0 summary['total_hours'] = 0 summary['begin'] = period_start summary['stop'] = period_stop rval[o['tenant_id']] = summary rval[o['tenant_id']][ 'total_disk_usage'] += o['disk_size'] * o['hours'] rval[o['tenant_id']]['total_cpu_usage'] += o['vcpus'] * o['hours'] rval[o['tenant_id']][ 'total_ram_usage'] += o['ram_size'] * o['hours'] if o['state'] is not 'terminated': rval[o['tenant_id']]['total_active_ram_size'] += o['ram_size'] rval[o['tenant_id']]['total_active_vcpus'] += o['vcpus'] rval[ o['tenant_id']]['total_active_disk_size'] += o['disk_size'] rval[o['tenant_id']]['total_active_instances'] += 1 rval[o['tenant_id']]['total_hours'] += o['hours'] rval[o['tenant_id']]['instances'].append(o) return rval.values()