def volume_backup_list_paged(request, marker=None, paginate=False, sort_dir="desc"): has_more_data = False has_prev_data = False backups = [] c_client = cinderclient(request) if c_client is None: return backups, has_more_data, has_prev_data if paginate: page_size = utils.get_page_size(request) # sort_key and sort_dir deprecated in kilo, use sort # if pagination is true, we use a single sort parameter # by default, it is "created_at" sort = 'created_at:' + sort_dir for b in c_client.backups.list(limit=page_size + 1, marker=marker, sort=sort): backups.append(VolumeBackup(b)) backups, has_more_data, has_prev_data = update_pagination( backups, page_size, marker, sort_dir) else: for b in c_client.backups.list(): backups.append(VolumeBackup(b)) return backups, has_more_data, has_prev_data
def volume_snapshot_list_paged(request, search_opts=None, marker=None, paginate=False, sort_dir="desc"): has_more_data = False has_prev_data = False snapshots = [] c_client = _cinderclient_with_generic_groups(request) if c_client is None: return snapshots, has_more_data, has_more_data if paginate: page_size = utils.get_page_size(request) # sort_key and sort_dir deprecated in kilo, use sort # if pagination is true, we use a single sort parameter # by default, it is "created_at" sort = 'created_at:' + sort_dir for s in c_client.volume_snapshots.list(search_opts=search_opts, limit=page_size + 1, marker=marker, sort=sort): snapshots.append(VolumeSnapshot(s)) snapshots, has_more_data, has_prev_data = update_pagination( snapshots, page_size, marker, sort_dir) else: for s in c_client.volume_snapshots.list(search_opts=search_opts): snapshots.append(VolumeSnapshot(s)) return snapshots, has_more_data, has_prev_data
def volume_backup_list_paged_with_page_menu(request, page_number=1, sort_dir="desc", all_tenants=False): backups = [] count = 0 pages_count = 0 page_size = utils.get_page_size(request) c_client = cinderclient(request, '3.45') if c_client is None: return backups, 0, count, pages_count offset = (page_number - 1) * page_size sort = 'created_at:' + sort_dir bkps, count = c_client.backups.list(limit=page_size, sort=sort, search_opts={ 'with_count': True, 'offset': offset, 'all_tenants': all_tenants }) if not bkps: return backups, page_size, count, pages_count if isinstance(bkps[0], list): bkps = bkps[0] pages_count = int(math.ceil(float(count) / float(page_size))) for b in bkps: backups.append(VolumeBackup(b)) return backups, page_size, count, pages_count
def flavor_list_paged(request, is_public=True, get_extras=False, marker=None, paginate=False, sort_key="name", sort_dir="desc", reversed_order=False): """Get the list of available instance sizes (flavors).""" has_more_data = False has_prev_data = False if paginate: if reversed_order: sort_dir = 'desc' if sort_dir == 'asc' else 'asc' page_size = utils.get_page_size(request) flavors = _nova.novaclient(request).flavors.list(is_public=is_public, marker=marker, limit=page_size + 1, sort_key=sort_key, sort_dir=sort_dir) flavors, has_more_data, has_prev_data = update_pagination( flavors, page_size, marker, reversed_order) else: flavors = _nova.novaclient(request).flavors.list(is_public=is_public) if get_extras: for flavor in flavors: flavor.extras = flavor_get_extras(request, flavor.id, True, flavor) return (flavors, has_more_data, has_prev_data)
def get_initial(self): return { 'language': utils.get_language(self.request), 'timezone': utils.get_timezone(self.request), 'pagesize': settings_utils.get_page_size(self.request), 'instance_log_length': settings_utils.get_log_length(self.request), }
def server_list_paged(request, search_opts=None, detailed=True, sort_dir="desc"): has_more_data = False has_prev_data = False nova_client = get_novaclient_with_locked_status(request) page_size = utils.get_page_size(request) search_opts = {} if search_opts is None else search_opts marker = search_opts.get('marker', None) if not search_opts.get('all_tenants', False): search_opts['project_id'] = request.user.tenant_id if search_opts.pop('paginate', False): reversed_order = sort_dir == "asc" LOG.debug("Notify received on deleted server: %r", ('server_deleted' in request.session)) deleted = request.session.pop('server_deleted', None) view_marker = 'possibly_deleted' if deleted and marker else 'ok' search_opts['marker'] = deleted if deleted else marker search_opts['limit'] = page_size + 1 search_opts['sort_dir'] = sort_dir servers = [ Server(s, request) for s in nova_client.servers.list(detailed, search_opts) ] if view_marker == 'possibly_deleted': if not servers: view_marker = 'head_deleted' search_opts['sort_dir'] = 'desc' reversed_order = False servers = [ Server(s, request) for s in nova_client.servers.list(detailed, search_opts) ] if not servers: view_marker = 'tail_deleted' search_opts['sort_dir'] = 'asc' reversed_order = True servers = [ Server(s, request) for s in nova_client.servers.list(detailed, search_opts) ] (servers, has_more_data, has_prev_data) = update_pagination(servers, page_size, marker, reversed_order) has_prev_data = (False if view_marker == 'head_deleted' else has_prev_data) has_more_data = (False if view_marker == 'tail_deleted' else has_more_data) else: servers = [ Server(s, request) for s in nova_client.servers.list(detailed, search_opts) ] return (servers, has_more_data, has_prev_data)
def volume_list_paged(request, search_opts=None, marker=None, paginate=False, sort_dir="desc"): """List volumes with pagination. To see all volumes in the cloud as an admin you can pass in a special search option: {'all_tenants': 1} """ has_more_data = False has_prev_data = False volumes = [] # To support filtering with group_id, we need to use the microversion. c_client = _cinderclient_with_generic_groups(request) if c_client is None: return volumes, has_more_data, has_prev_data # build a dictionary of volume_id -> transfer transfers = { t.volume_id: t for t in transfer_list(request, search_opts=search_opts) } if paginate: page_size = utils.get_page_size(request) # sort_key and sort_dir deprecated in kilo, use sort # if pagination is true, we use a single sort parameter # by default, it is "created_at" sort = 'created_at:' + sort_dir for v in c_client.volumes.list(search_opts=search_opts, limit=page_size + 1, marker=marker, sort=sort): v.transfer = transfers.get(v.id) volumes.append(Volume(v)) volumes, has_more_data, has_prev_data = update_pagination( volumes, page_size, marker, sort_dir) else: for v in c_client.volumes.list(search_opts=search_opts): v.transfer = transfers.get(v.id) volumes.append(Volume(v)) return volumes, has_more_data, has_prev_data
def metadefs_namespace_list(request, filters=None, sort_dir='asc', sort_key='namespace', marker=None, paginate=False): """Retrieve a listing of Namespaces :param paginate: If true will perform pagination based on settings. :param marker: Specifies the namespace of the last-seen namespace. The typical pattern of limit and marker is to make an initial limited request and then to use the last namespace from the response as the marker parameter in a subsequent limited request. With paginate, limit is automatically set. :param sort_dir: The sort direction ('asc' or 'desc'). :param sort_key: The field to sort on (for example, 'created_at'). Default is namespace. The way base namespaces are loaded into glance typically at first deployment is done in a single transaction giving them a potentially unpredictable sort result when using create_at. :param filters: specifies addition fields to filter on such as resource_types. :returns A tuple of three values: 1) Current page results 2) A boolean of whether or not there are previous page(s). 3) A boolean of whether or not there are more page(s). """ # Listing namespaces requires the v2 API. If not supported we return an # empty array so callers don't need to worry about version checking. if filters is None: filters = {} limit = settings.API_RESULT_LIMIT page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit kwargs = {'filters': filters} if marker: kwargs['marker'] = marker kwargs['sort_dir'] = sort_dir kwargs['sort_key'] = sort_key namespaces_iter = glanceclient(request).metadefs_namespace.list( page_size=request_size, limit=limit, **kwargs) # Filter the namespaces based on the provided properties_target since this # is not supported by the metadata namespaces API. resource_types = filters.get('resource_types') properties_target = filters.get('properties_target') if resource_types and properties_target: namespaces_iter = filter_properties_target(namespaces_iter, resource_types, properties_target) has_prev_data = False has_more_data = False if paginate: namespaces = list(itertools.islice(namespaces_iter, request_size)) # first and middle page condition if len(namespaces) > page_size: namespaces.pop(-1) has_more_data = True # middle page condition if marker is not None: has_prev_data = True # first page condition when reached via prev back elif sort_dir == 'desc' and marker is not None: has_more_data = True # last page condition elif marker is not None: has_prev_data = True else: namespaces = list(namespaces_iter) namespaces = [Namespace(namespace) for namespace in namespaces] return namespaces, has_more_data, has_prev_data
def image_list_detailed(request, marker=None, sort_dir='desc', sort_key='created_at', filters=None, paginate=False, reversed_order=False, **kwargs): """Thin layer above glanceclient, for handling pagination issues. It provides iterating both forward and backward on top of ascetic OpenStack pagination API - which natively supports only iterating forward through the entries. Thus in order to retrieve list of objects at previous page, a request with the reverse entries order had to be made to Glance, using the first object id on current page as the marker - restoring the original items ordering before sending them back to the UI. :param request: The request object coming from browser to be passed further into Glance service. :param marker: The id of an object which defines a starting point of a query sent to Glance service. :param sort_dir: The direction by which the resulting image list throughout all pages (if pagination is enabled) will be sorted. Could be either 'asc' (ascending) or 'desc' (descending), defaults to 'desc'. :param sort_key: The name of key by which the resulting image list throughout all pages (if pagination is enabled) will be sorted. Defaults to 'created_at'. :param filters: A dictionary of filters passed as is to Glance service. :param paginate: Whether the pagination is enabled. If it is, then the number of entries on a single page of images table is limited to the specific number stored in browser cookies. :param reversed_order: Set this flag to True when it's necessary to get a reversed list of images from Glance (used for navigating the images list back in UI). """ limit = settings.API_RESULT_LIMIT page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit _normalize_list_input(filters, **kwargs) kwargs = {'filters': filters or {}} if marker: kwargs['marker'] = marker kwargs['sort_key'] = sort_key if not reversed_order: kwargs['sort_dir'] = sort_dir else: kwargs['sort_dir'] = 'desc' if sort_dir == 'asc' else 'asc' images_iter = glanceclient(request).images.list(page_size=request_size, limit=limit, **kwargs) has_prev_data = False has_more_data = False if paginate: images = list(itertools.islice(images_iter, request_size)) # first and middle page condition if len(images) > page_size: images.pop(-1) has_more_data = True # middle page condition if marker is not None: has_prev_data = True # first page condition when reached via prev back elif reversed_order and marker is not None: has_more_data = True # last page condition elif marker is not None: has_prev_data = True # restore the original ordering here if reversed_order: images = sorted(images, key=lambda image: (getattr(image, sort_key) or '').lower(), reverse=(sort_dir == 'desc')) else: images = list(images_iter) # TODO(jpichon): Do it better wrapped_images = [] for image in images: wrapped_images.append(Image(image)) return wrapped_images, has_more_data, has_prev_data
def server_list_paged(request, search_opts=None, detailed=True, sort_dir="desc"): has_more_data = False has_prev_data = False nova_client = get_novaclient_with_locked_status(request) page_size = utils.get_page_size(request) search_opts = {} if search_opts is None else search_opts marker = search_opts.get('marker', None) if not search_opts.get('all_tenants', False): search_opts['project_id'] = request.user.tenant_id if search_opts.pop('paginate', False): reversed_order = sort_dir == "asc" LOG.debug("Notify received on deleted server: %r", ('server_deleted' in request.session)) deleted = request.session.pop('server_deleted', None) view_marker = 'possibly_deleted' if deleted and marker else 'ok' search_opts['marker'] = deleted if deleted else marker search_opts['limit'] = page_size + 1 # NOTE(amotoki): It looks like the 'sort_keys' must be unique to make # the pagination in the nova API works as expected. Multiple servers # can have a same 'created_at' as its resolution is a second. # To ensure the uniqueness we add 'uuid' to the sort keys. # 'display_name' is added before 'uuid' to list servers in the # alphabetical order. sort_keys = ['created_at', 'display_name', 'uuid'] servers = [Server(s, request) for s in nova_client.servers.list(detailed, search_opts, sort_keys=sort_keys, sort_dirs=[sort_dir] * 3)] if view_marker == 'possibly_deleted': if not servers: view_marker = 'head_deleted' reversed_order = False servers = [Server(s, request) for s in nova_client.servers.list(detailed, search_opts, sort_keys=sort_keys, sort_dirs=['desc'] * 3)] if not servers: view_marker = 'tail_deleted' reversed_order = True servers = [Server(s, request) for s in nova_client.servers.list(detailed, search_opts, sort_keys=sort_keys, sort_dirs=['asc'] * 3)] (servers, has_more_data, has_prev_data) = update_pagination( servers, page_size, marker, reversed_order) has_prev_data = (False if view_marker == 'head_deleted' else has_prev_data) has_more_data = (False if view_marker == 'tail_deleted' else has_more_data) else: servers = [Server(s, request) for s in nova_client.servers.list(detailed, search_opts)] return (servers, has_more_data, has_prev_data)