def purchase_update(context, values): result = model_query(context, models.Purchase).\ filter_by(subscription_id=values['subscription_id']).\ first() if not result: purchase_create(context, values) else: model_query(context, models.Purchase).\ filter_by(subscription_id=values['subscription_id']).\ update({'line_total':values['line_total']})
def delete(self, req, id): context = req.environ['nova.context'] authorize(context) workload = model_query(context, Workload).\ filter_by(project_id=context.project_id).\ filter_by(id=int(id)).first() if workload: result = model_query(context, Workload).\ filter_by(project_id=context.project_id).\ filter_by(id=int(id)).\ soft_delete() return {"status": "SUCCESS"} else: return {"status": "FAILURE"}
def _delete_dependents_for_all_parents(parent_list, dep_type_list, kwargs): """Delete all dependents of each parent, returning the # deleted. """ deleted_count = 0 if len(parent_list) * len(dep_type_list) == 0: return deleted_count # Process arguments: Context context = kwargs.get('context', None) if context is None: context = nova_context.get_admin_context() # For each dependent-type and each parent, delete all the dependents for dep_type in dep_type_list: query = nova_db_sa_api.model_query(context, dep_type.dep_class, **kwargs) filters_dict = {'deleted': 0} # For each parent object, delete all of it dependent objects for parent in parent_list: filters_dict[dep_type.fkey_attr] = getattr(parent, dep_type.pkey_attr) if dep_type.is_deletion_soft: deleted_count += query.filter_by(**filters_dict).soft_delete() else: deleted_count += query.filter_by(**filters_dict).delete() filters_dict.pop(dep_type.fkey_attr) return deleted_count
def subscription_get_all(context, filters=None): filters = filters or dict() filters = dict(filter(lambda (x, y): x in ['project_id', 'product_id', 'resource_uuid'], filters.items())) return model_query(context, models.Subscription).filter_by(**filters).all()
def _get_ncpu_emc_target_info_list(self): target_info_list = [] # Find the targets used by VM on the compute node bdms = db_api.model_query(context.get_admin_context(), models.BlockDeviceMapping, session = db_api.get_session()) bdms = bdms.filter(models.BlockDeviceMapping.connection_info != None) bdms = bdms.join(models.BlockDeviceMapping.instance).filter_by( host=string.strip(self.host_name)) for bdm in bdms: conn_info = json.loads(bdm.connection_info) if 'data' in conn_info: if 'target_iqns' in conn_info['data']: target_iqns = conn_info['data']['target_iqns'] target_luns = conn_info['data']['target_luns'] elif 'target_iqn' in conn_info['data']: target_iqns = [conn_info['data']['target_iqn']] target_luns = [conn_info['data']['target_lun']] else: target_iqns = [] target_luns = [] for target_iqn, target_lun in zip(target_iqns, target_luns): if 'com.emc' in target_iqn: target_info = { 'target_iqn': target_iqn, 'target_lun': target_lun, } target_info_list.append(target_info) return target_info_list
def purchase_get(context, purchase_id): result = model_query(context, models.Purchase).\ filter_by(id=purchase_id).\ first() if not result: raise exception.PurchaseNotFound(purchase_id=purchase_id) return result
def myproject_host_get(context, host_name, session=None, check_update = True): query = model_query(context, models.MyProjectHost, session=session).\ filter_by(host_name=host_name) if check_update: query = _filter_down_hosts(query) return query.first()
def region_get_by_name(context, region_name): result = model_query(context, models.Region).\ filter_by(name=region_name).\ first() if not result: raise exception.RegionNotFoundByName(region_name=region_name) return result
def region_get(context, region_id): result = model_query(context, models.Region).\ filter_by(id=region_id).\ first() if not result: raise exception.RegionNotFound(region_id=region_id) return result
def index(self, req): """Return a list of all workloads.""" context = req.environ['nova.context'] authorize(context) workloads = [] builds = self.workloads_get_all(context) for workload in builds: LOG.debug("Inspecting workload %s", workload.name) result = model_query(context, Instance, ( func.count(Instance.id), func.sum(Instance.memory_mb))).\ filter(or_(Instance.display_name.like(workload.name+"-%"), Instance.display_name.like(workload.name+"\ -%"))).\ filter(and_( Instance.deleted != Instance.id, Instance.vm_state != vm_states.SOFT_DELETED )).\ filter_by(project_id=context.project_id).first() instances = result[0] memory_mb = result[1] or 0 workloads.append({ 'id': workload.id, 'name': workload.name, 'priority': workload.priority, 'instances': instances, 'memory_mb': int(memory_mb) }) #LOG.debug("Got result %s", str(workloads)) return {'workloads': workloads}
def subscription_get_byname(context, resource_name): result = model_query(context, models.Subscription).\ filter_by(resource_name=resource_name).\ first() if not result: return None return result
def item_get(context, item_id): result = model_query(context, models.Item).\ filter_by(id=item_id).\ first() if not result: raise exception.ItemNotFound(item_id=item_id) return result
def product_get(context, product_id): result = model_query(context, models.Product).\ filter_by(id=product_id).\ first() if not result: raise exception.ProductNotFound(product_id=product_id) return result
def subscription_get(context, subscription_id): result = model_query(context, models.Subscription).\ filter_by(id=subscription_id).\ first() if not result: raise exception.SubscriptionNotFound(subscription_id=subscription_id) return result
def payment_type_get(context, payment_type_id): result = model_query(context, models.PaymentType).\ filter_by(id=payment_type_id).\ first() if not result: raise exception.PaymentTypeNotFound(payment_type_id=payment_type_id) return result
def item_type_get_by_name(context, item_type_name): result = model_query(context, models.ItemType).\ filter_by(name=item_type_name).\ first() if not result: raise exception.ItemTypeNotFoundByName(item_type_name=item_type_name) return result
def product_get_all(context, filters=None): filters = filters or dict() filters = dict(filter(lambda (x, y): x in ['region_id', 'item_id', 'item_type_id', 'payment_type_id'], filters.items())) return model_query(context, models.Product).filter_by(**filters).all()
def payment_type_get_by_name(context, payment_type_name): result = model_query(context, models.PaymentType).\ filter_by(name=payment_type_name).\ first() if not result: raise exception.PaymentTypeNotFoundByName( payment_type_name=payment_type_name) return result
def purchase_get_by_subscription_recent(context, subscription_id): result = model_query(context, models.Purchase).\ filter_by(subscription_id=subscription_id).\ order_by(desc(models.Purchase.created_at)).\ first() if not result: raise exception.PurchaseNotFoundBySubscription( subscription_id=subscription_id) return result
def get_host_capability_by_id(context, host_id): """Returns a dict describing specific host_id""" session = get_session() result = model_query(context, HostCapability, session=session).filter_by(id=host_id).first() if not result: raise exception.NotFound("No host capability found by id %s" % id) return result
def get_instance_type(type_id): global inst_types context = RequestContext('1', '1', is_admin=True) if type_id in inst_types: return inst_types[type_id] else: inst_type = sqlapi.model_query(context, novamodels.InstanceTypes)\ .filter_by(id=type_id).first() inst_types[type_id] = inst_type return inst_type
def _chown_actions_db(ctx, context, instance_uuid): query = nova_db.model_query(ctx, nova_db_models.InstanceAction) query = query.filter_by(instance_uuid=instance_uuid) action_ids = [] for action in query.all(): action_ids.append(action.id) action.project_id = context.target_project_id action.user_id = context.target_user_id ctx.session.add(action) return action_ids
def update_pending_orders(self, context): orders = model_query(context, WorkloadOrder).\ filter_by(status="PENDING").\ join((Workload, Workload.id == WorkloadOrder.workload_id)).\ filter_by(project_id = context.project_id).\ order_by(asc(Workload.priority)) orderlist = [] for order in orders: orderlist.append(order['id']) # We seem to have to do this two-step because of SQLAlchemy # Session issues for order_id in orderlist: order = model_query(context, WorkloadOrder).\ filter_by(id=order_id).first() quotas = QUOTAS.get_project_quotas(context, context.project_id) ram_clear = instances_clear = False for entry in quotas: if entry == "ram": ram_clear = ( quotas[entry]['reserved'] + quotas[entry]['in_use'] + ((order["memory_mb"] or 1024) * order['instances'])) <= quotas[entry]['limit'] if entry == "instances": instances_clear = ( quotas[entry]['reserved'] + quotas[entry]['in_use'] + (order["instances"] or 1)) <= quotas[entry]['limit'] if instances_clear and ram_clear: LOG.debug("Updating order status to open %s", (str(order['id']))) order.status = "OPEN" order.save()
def _get_ncpu_emc_target_info_list(self): target_info_list = [] # Find the targets used by VM on the compute node bdms = db_api.model_query(context.get_admin_context(), models.BlockDeviceMapping, session=db_api.get_session()) bdms = bdms.filter(models.BlockDeviceMapping.connection_info != None) bdms = bdms.join(models.BlockDeviceMapping.instance).filter_by( host=string.strip(self.host_name)) for bdm in bdms: conn_info = json.loads(bdm.connection_info) if conn_info is not None and 'data' in conn_info: if 'target_iqns' in conn_info['data']: target_iqns = conn_info['data']['target_iqns'] # Compatible check for VNX icehouse driver if 'target_luns' in conn_info['data']: target_luns = conn_info['data']['target_luns'] else: target_luns = ([conn_info['data']['target_lun']] * len(target_iqns)) elif 'target_iqn' in conn_info['data']: target_iqns = [conn_info['data']['target_iqn']] target_luns = [conn_info['data']['target_lun']] else: target_iqns = [] target_luns = [] for target_iqn, target_lun in zip(target_iqns, target_luns): if 'com.emc' in target_iqn: target_info = { 'target_iqn': target_iqn, 'target_lun': target_lun, } target_info_list.append(target_info) return target_info_list
def item_type_get_all(context, filters=None): filters = filters or dict() return model_query(context, models.ItemType).filter_by(**filters).all()
def payment_type_get_all(context, filters=None): filters = filters or dict() return model_query(context, models.PaymentType).filter_by(**filters).all()
def subscription_get_all_by_resource_uuid(context, resource_uuid): return model_query(context, models.Subscription).\ filter_by(resource_uuid=resource_uuid).\ all()
def subscription_get_all_by_project(context, project_id): return model_query(context, models.Subscription, read_deleted='yes').\ filter_by(project_id=project_id).\ all()
def region_get_all(context, filters=None): filters = filters or dict() return model_query(context, models.Region).filter_by(**filters).all()
def workloads_get_all(self, context): return model_query(context, Workload).\ filter_by(project_id=context.project_id)
def host_capability_get_all(context, filters=None): session = get_session() return model_query(context, HostCapability, session=session)
def myproject_host_get_all(context, session=None, check_update=True): query = model_query(context, models.MyProjectHost, session=session) if check_update: query = _filter_down_hosts(query) return query.all()
def myproject_host_get(context, host_name, session=None, check_update=True): query = model_query(context, models.MyProjectHost, session=session).\ filter_by(host_name=host_name) if check_update: query = _filter_down_hosts(query) return query.first()
def subscription_get_all_by_project(context, project_id): return model_query(context, models.Subscription, read_deleted="yes").filter_by(project_id=project_id).all()
def subscription_get_all_by_project(context, project_id): return model_query(context, models.Subscription).\ filter_by(project_id=project_id).\ all()
def get_computes(): context = RequestContext('1', '1', is_admin=True) return sqlapi.model_query(context, novamodels.Service, read_deleted='no')\ .filter_by(topic='compute').all()
def _host_capability_get_query(context, session=None, read_deleted=None): return model_query(context, HostCapability, session=session, read_deleted=read_deleted)
def myproject_host_get_all(context, session=None, check_update = True): query = model_query(context, models.MyProjectHost, session=session) if check_update: query = _filter_down_hosts(query) return query.all()
def update(self, req, id, body): """ Update a workload. Potential arguments: {"workload": {"name": "New Name", "priority": 5}} {"order": {"id": 1, "status": "FILLED"}} {"order": {"instances": 1, "memory_mb": 4096}} """ context = req.environ['nova.context'] authorize(context) workload = model_query(context, Workload).\ filter_by(project_id=context.project_id).\ filter_by(id=int(id)).first() orders = [] if workload: if body.get("workload"): if body['workload'].get("name"): workload.name = body['workload'].get("name") if body['workload'].get("priority"): workload.priority = body['workload'].get("priority") workload.save() if body.get("order"): for order_req in body.get("order"): if order_req.get("id"): # We're updating an existing order. order = model_query(context, WorkloadOrder).\ filter_by(workload_id=workload.id).\ filter_by(id=order_req['id']).first() if order.status == "OPEN" or order.status == "PENDING": if order_req.get("instances"): order.instances = order_req.get("instances") if order_req.get("memory_mb"): order.memory_mb = order_req.get("memory_mb") if order_req.get("status") and order_req.get( "status") in ORDER_STATUSES: order.status = order_req.get("status") order.save() orders.append(order) elif order_req.get("instances") or order_req.get( "memory_mb"): # We're creating a new order. # At some point we should check if we have # an existing open/pending order and just update that. order_status = "OPEN" # If it's a grow order, check and see if we're at capacity. if (order_req.get("instances") or 1) > 0: # Check to see if we have a pending order. pending_order = model_query(context, WorkloadOrder).\ filter_by(workload_id=workload.id).\ filter(or_(WorkloadOrder.status=="PENDING",WorkloadOrder.status=="OPEN")).\ filter(WorkloadOrder.instances >= 1).first() if pending_order: return { "status": "Failure", "message": "Existing pending or open order." } # We're growing, check to see if we can fit under quota limits. quotas = QUOTAS.get_project_quotas( context, context.project_id) instances = order_req.get("instances") or 1 for entry in quotas: if entry == "ram": LOG.debug( "Quota check %s", str(quotas[entry]['reserved'] + quotas[entry]['in_use'] + (order_req.get("memory_mb") or 1024 ))) if (quotas[entry]['reserved'] + quotas[entry]['in_use'] + ((order_req.get("memory_mb") or 1024) * instances)) >= quotas[entry]['limit']: order_status = "PENDING" if entry == "instances": if (quotas[entry]['reserved'] + quotas[entry]['in_use'] + (order_req.get("instances") or 1)) >= quotas[entry]['limit']: order_status = "PENDING" order = WorkloadOrder() order.workload_id = workload.id order.instances = order_req.get("instances") or 1 order.memory_mb = order_req.get("memory_mb") or 0 order.status = order_status order.save() orders.append(order) return {"workload": workload, "order": orders}
def _db_compute_node_get_all_by_uuids(context, compute_uuids): db_computes = sa_api.model_query(context, models.ComputeNode).filter( models.ComputeNode.uuid.in_(compute_uuids)).all() return db_computes
def show(self, req, id): context = req.environ['nova.context'] authorize(context) # Check to see if we have workload orders that should be open self.update_pending_orders(context) query = model_query(context, Workload).\ filter_by(project_id=context.project_id).\ filter_by(id=int(id)) workload = query.first() orders = [] if workload: # Check and see if we're elidgible for scale-down. # We aren't if we already have open scale-down # orders. elidgible = True query = model_query(context, WorkloadOrder).\ filter(or_( WorkloadOrder.status == "OPEN", WorkloadOrder.status == "WORKING" )).filter_by(workload_id=workload.id) for order in query: if order['instances'] < 0: # Pre-existing scale down order. elidgible = False if elidgible: # First check to see if there are pending orders for # higher priority workloads. query = model_query(context, WorkloadOrder).\ filter_by(status="PENDING").\ join((Workload, Workload.id == WorkloadOrder.workload_id)).\ filter_by(project_id = context.project_id).\ filter(Workload.priority < workload.priority) for pending in query: # We have a higher priority workload in a pending state, insert a scale-down order. # This should probably check and ensure an existing scale-down order doesn't exist. order = WorkloadOrder() order.workload_id = workload.id order.instances = pending['instances'] * -1 order.memory_mb = pending['memory_mb'] order.status = "OPEN" order.save() # Only handle one at a time. break # # Check and see if we have pending orders that # # can now be opened. # query = model_query(context, WorkloadOrder).\ # filter_by(workload_id=workload.id).\ # filter_by(status="PENDING") # for order in query: # quotas = QUOTAS.get_project_quotas(context, context.project_id) # ram_clear = instances_clear = False # for entry in quotas: # if entry == "ram": # ram_clear = (quotas[entry]['reserved']+quotas[entry]['in_use']+(order["memory_mb"] or 1024)) <= quotas[entry]['limit'] # if entry == "instances": # instances_clear = (quotas[entry]['reserved']+quotas[entry]['in_use']+(order["instances"] or 1)) <= quotas[entry]['limit'] # if ram_clear and instances_clear: # order.status = "OPEN" # order.save() # Now list the orders we have open. query = model_query(context, WorkloadOrder).\ filter_by(workload_id=workload.id).\ filter_by(status="OPEN") for order in query: orders.append({ "id": order.id, "instances": order.instances, "memory_mb": order.memory_mb }) else: return {} return {"orders": orders}