def flavor_get_all(context, inactive=False, filters=None, sort_key='flavorid', sort_dir='asc', limit=None, marker=None): """Returns all flavors. """ filters = filters or {} # FIXME(sirp): now that we have the `disabled` field for flavors, we # should probably remove the use of `deleted` to mark inactive. `deleted` # should mean truly deleted, e.g. we can safely purge the record out of the # database. read_deleted = "yes" if inactive else "no" query = _flavor_get_query(context, read_deleted=read_deleted) if 'min_memory_mb' in filters: query = query.filter( models.InstanceTypes.memory_mb >= filters['min_memory_mb']) if 'min_root_gb' in filters: query = query.filter( models.InstanceTypes.root_gb >= filters['min_root_gb']) if 'disabled' in filters: query = query.filter( models.InstanceTypes.disabled == filters['disabled']) if 'is_public' in filters and filters['is_public'] is not None: the_filter = [models.InstanceTypes.is_public == filters['is_public']] # if filters['is_public'] and context.project_id is not None: # the_filter.extend([ # models.InstanceTypes.projects.any( # project_id=context.project_id, deleted=0) # ]) if len(the_filter) > 1: query = query.filter(or_(*the_filter)) else: query = query.filter(the_filter[0]) marker_row = None if marker is not None: marker_row = _flavor_get_query(context, read_deleted=read_deleted).\ filter_by(flavorid=marker).\ first() if not marker_row: raise Exception("MarkerNotFound(%s)" % (marker)) # query = sqlalchemyutils.paginate_query(query, models.InstanceTypes, limit, # [sort_key, 'id'], # marker=marker_row, # sort_dir=sort_dir) query = RomeQuery(models.InstanceTypes) inst_types = query.all() return [_dict_with_extra_specs(i) for i in inst_types]
def model_query(context, *args, **kwargs): # base_model = kwargs["base_model"] # models = args return RomeQuery(*args, **kwargs)
def compute_node_get_all(context, no_date_fields): # NOTE(msdubov): Using lower-level 'select' queries and joining the tables # manually here allows to gain 3x speed-up and to have 5x # less network load / memory usage compared to the sqla ORM. # engine = get_engine() # # Retrieve ComputeNode, Service # compute_node = models.ComputeNode.__table__ # service = models.Service.__table__ # with engine.begin() as conn: # redundant_columns = set(['deleted_at', 'created_at', 'updated_at', # 'deleted']) if no_date_fields else set([]) # def filter_columns(table): # return [c for c in table.c if c.name not in redundant_columns] # compute_node_query = sql.select(filter_columns(compute_node)).\ # where(compute_node.c.deleted == 0).\ # order_by(compute_node.c.service_id) # compute_node_rows = conn.execute(compute_node_query).fetchall() # service_query = sql.select(filter_columns(service)).\ # where((service.c.deleted == 0) & # (service.c.binary == 'nova-compute')).\ # order_by(service.c.id) # service_rows = conn.execute(service_query).fetchall() # # Join ComputeNode & Service manually. # services = {} # for proxy in service_rows: # services[proxy['id']] = dict(proxy.items()) # compute_nodes = [] # for proxy in compute_node_rows: # node = dict(proxy.items()) # node['service'] = services.get(proxy['service_id']) # compute_nodes.append(node) from lib.rome.core.dataformat.json import Encoder from lib.rome.core.dataformat.json import Decoder query = RomeQuery(models.ComputeNode) compute_nodes = query.all() def novabase_to_dict(ref): request_uuid = uuid.uuid1() encoder = Encoder(request_uuid=request_uuid) decoder = Decoder(request_uuid=request_uuid) json_object = encoder.simplify(ref) json_object.pop("_metadata_novabase_classname") return decoder.desimplify(json_object) # result = [] # for each in compute_nodes: # compute_node = novabase_to_dict(each) # compute_node["service"] = novabase_to_dict(compute_node["service"]) # compute_node["service"].pop("compute_node") # result += [compute_node] return compute_nodes