def test_session_gets_set(self): requested_url = '/project/instances/' request = self.factory.get(requested_url) request.session['horizon_pagesize'] = 'not int-able' default = 30 functions.get_page_size(request, default) self.assertEqual(request.session['horizon_pagesize'], default)
def test_session_gets_set(self): requested_url = "/project/instances/" request = self.factory.get(requested_url) request.session["horizon_pagesize"] = "not int-able" default = 30 functions.get_page_size(request, default) self.assertEqual(request.session["horizon_pagesize"], default)
def backups_list(request, offset=0, time_after=None, time_before=None, text_match=None): """List all backups and optionally you can provide filters and pagination values """ page_size = utils.get_page_size(request) search = {} if time_after: search['time_after'] = time_after if time_before: search['time_before'] = time_before if text_match: search['match'] = [{ "_all": text_match, }] backups = _freezerclient(request).backups.list(limit=page_size + 1, offset=offset, search=search) if len(backups) > page_size: backups.pop() has_more = True else: has_more = False # Wrap data in object for easier handling backups = [Backup(data) for data in backups] return backups, has_more
def stacks_list(request, marker=None, sort_dir='desc', sort_key='created_at', paginate=False): limit = getattr(settings, 'API_RESULT_LIMIT', 1000) page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit kwargs = {'sort_dir': sort_dir, 'sort_key': sort_key} if marker: kwargs['marker'] = marker stacks_iter = heatclient(request).stacks.list(limit=request_size, **kwargs) has_prev_data = False has_more_data = False stacks = list(stacks_iter) if paginate: if len(stacks) > page_size: stacks.pop() has_more_data = True if marker is not None: has_prev_data = True elif sort_dir == 'asc' and marker is not None: has_more_data = True elif marker is not None: has_prev_data = True return (stacks, has_more_data, has_prev_data)
def get_data(self): page_offset = self.request.GET.get('page_offset') ts_mode = self.request.GET.get('ts_mode') ts_offset = self.request.GET.get('ts_offset') contacts = [] object_id = self.kwargs['id'] name = self.kwargs['name'] if not ts_mode: ts_mode = alarm_history_default_ts_format if not page_offset: page_offset = 0 limit = utils.get_page_size(self.request) try: results = api.monitor.alarm_history(self.request, object_id, page_offset, limit) paginator = Paginator(results, limit) contacts = paginator.page(1) except EmptyPage: contacts = paginator.page(paginator.num_pages) except Exception: messages.error( self.request, _("Could not retrieve alarm history for %s") % object_id) try: return transform_alarm_history(contacts, name, ts_mode, ts_offset) except ValueError as err: LOG.warning('Failed to transform alarm history due to %s' % err.message) messages.warning(self.request, _('Failed to present alarm ' 'history')) return []
def get_initial(self): return { 'language': utils.get_language(self.request), 'timezone': utils.get_timezone(self.request), 'pagesize': utils.get_page_size(self.request), 'instance_log_length': utils.get_log_length(self.request) }
def trigger_list_paged(request, detailed=False, search_opts=None, marker=None, limit=None, sort_key=None, sort_dir=None, sort=None, paginate=False, reversed_order=False): has_more_data = False has_prev_data = False if paginate: if reversed_order: sort_dir = 'desc' if sort_dir == 'asc' else 'asc' page_size = utils.get_page_size(request) triggers = karborclient(request).triggers.list(detailed=detailed, search_opts=search_opts, marker=marker, limit=page_size + 1, sort_key=sort_key, sort_dir=sort_dir, sort=sort) triggers, has_more_data, has_prev_data = update_pagination( triggers, page_size, marker, sort_dir, sort_key, reversed_order) else: triggers = karborclient(request).triggers.list(detailed=detailed, search_opts=search_opts, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, sort=sort) return (triggers, has_more_data, has_prev_data)
def checkpoint_list_paged(request, provider_id=None, search_opts=None, marker=None, limit=None, sort_key=None, sort_dir=None, sort=None, paginate=False, reversed_order=False): has_more_data = False has_prev_data = False if paginate: if reversed_order: sort_dir = 'desc' if sort_dir == 'asc' else 'asc' page_size = utils.get_page_size(request) checkpoints = karborclient(request).checkpoints.list( provider_id=provider_id, search_opts=search_opts, marker=marker, limit=page_size + 1, sort_key=sort_key, sort_dir=sort_dir, sort=sort) checkpoints, has_more_data, has_prev_data = \ get_pagination_info( checkpoints, page_size, marker, reversed_order) else: checkpoints = karborclient(request).checkpoints.list( provider_id=provider_id, search_opts=search_opts, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, sort=sort) return (checkpoints, has_more_data, has_prev_data)
def get_data(self): sort_dir = self.request.GET.get('sort_dir', 'asc') opts = { 'include_disabled': True, 'sort_dir': sort_dir, } marker = self.request.GET.get( tables.PackageDefinitionsTable._meta.pagination_param, None) opts = self.get_filters(opts) packages = [] page_size = utils.get_page_size(self.request) with api.handled_exceptions(self.request): packages, extra = pkg_api.package_list( self.request, marker=marker, filters=opts, paginate=True, page_size=page_size) if sort_dir == 'asc': self._more = extra else: packages = list(reversed(packages)) self._prev = extra if packages: if sort_dir == 'asc': backward_marker = packages[0].id opts['sort_dir'] = 'desc' else: backward_marker = packages[-1].id opts['sort_dir'] = 'asc' __, extra = pkg_api.package_list( self.request, filters=opts, paginate=True, marker=backward_marker, page_size=0) if sort_dir == 'asc': self._prev = extra else: self._more = extra # Add information about project tenant for admin user if self.request.user.is_superuser: tenants = [] try: tenants, _more = keystone.tenant_list(self.request) except Exception: exceptions.handle(self.request, _("Unable to retrieve project list.")) tenent_name_by_id = {tenant.id: tenant.name for tenant in tenants} for i, p in enumerate(packages): packages[i].tenant_name = tenent_name_by_id.get(p.owner_id) else: current_tenant = self.request.session['token'].tenant for i, package in enumerate(packages): if package.owner_id == current_tenant['id']: packages[i].tenant_name = current_tenant['name'] else: packages[i].tenant_name = _('UNKNOWN') return packages
def get_data(self): default_limit = utils.get_page_size(self.request) default_sort_dir = "asc" marker = self.request.GET.get('marker', "") _rbd_status = [] try: _rbd_status = vsmapi.rbd_pool_status(self.request, paginate_opts={ "limit": default_limit, "sort_dir": default_sort_dir, "marker": marker, }) if _rbd_status: logging.debug("resp body in view: %s" % _rbd_status) except: exceptions.handle(self.request, _('Unable to retrieve sever list. ')) rbd_status = [] for _rbd in _rbd_status: rbd = { "id": _rbd.id, "pool": _rbd.pool, "image_name": _rbd.image_name, "size": _rbd.size/(1024*1024), "objects": _rbd.objects, "order": _rbd.order, "format": _rbd.format, "updated_at": get_time_delta(_rbd.updated_at), } rbd_status.append(rbd) return rbd_status
def server_list(request, search_opts=None, detailed=True): nova_client = get_novaclient_with_locked_status(request) page_size = utils.get_page_size(request) paginate = False if search_opts is None: search_opts = {} limit = search_opts.get('limit', None) page_size = base.get_request_page_size(request, limit) if 'paginate' in search_opts: paginate = search_opts.pop('paginate') if paginate: search_opts['limit'] = page_size + 1 all_tenants = search_opts.get('all_tenants', False) if all_tenants: search_opts['all_tenants'] = True else: search_opts['project_id'] = request.user.tenant_id servers = [Server(s, request) for s in nova_client.servers.list(detailed, search_opts)] has_more_data = False if paginate and len(servers) > page_size: servers.pop(-1) has_more_data = True elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT', 1000): has_more_data = True return (servers, has_more_data)
def stacks_list(request, marker=None, sort_dir="desc", sort_key="created_at", paginate=False): limit = getattr(settings, "API_RESULT_LIMIT", 1000) page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit kwargs = {"sort_dir": sort_dir, "sort_key": sort_key} if marker: kwargs["marker"] = marker stacks_iter = heatclient(request).stacks.list(limit=request_size, **kwargs) has_prev_data = False has_more_data = False stacks = list(stacks_iter) if paginate: if len(stacks) > page_size: stacks.pop() has_more_data = True if marker is not None: has_prev_data = True elif sort_dir == "asc" and marker is not None: has_more_data = True elif marker is not None: has_prev_data = True return (stacks, has_more_data, has_prev_data)
def plan_list(request, search_opts=None): search_opts = search_opts or {} paginate = search_opts.pop('paginate', False) marker = search_opts.pop('marker', None) sort_dir = search_opts.pop('sort_dir', 'desc') if paginate: page_size = utils.get_page_size(request) plans = api.conveyorclient(request).plans.list( search_opts, marker=marker, limit=page_size + 1, sort_key='created_at', sort_dir=sort_dir) else: plans = api.conveyorclient(request).plans.list(search_opts) plans = [models.Plan(p) for p in plans] if paginate: return update_pagination(plans, page_size, marker, sort_dir) else: return plans, None, None
def server_list(request, search_opts=None, all_tenants=False): page_size = utils.get_page_size(request) paginate = False if search_opts is None: search_opts = {} elif 'paginate' in search_opts: paginate = search_opts.pop('paginate') if paginate: search_opts['limit'] = page_size + 1 if all_tenants: search_opts['all_tenants'] = True else: search_opts['project_id'] = request.user.tenant_id servers = [s for s in resource_list(request, consts.NOVA_SERVER, search_opts)] has_more_data = False if paginate and len(servers) > page_size: servers.pop(-1) has_more_data = True elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT', 1000): has_more_data = True return ([os_api.nova.Server(i, request) for i in servers], has_more_data)
def volume_list_paged(request, search_opts=None, marker=None, paginate=False, sort_dir="desc"): """To see all volumes in the cloud as an admin you can pass in a special search option: {'all_tenants': 1} """ has_more_data = False has_prev_data = False volumes = [] c_client = cinderclient(request) if c_client is None: return volumes, has_more_data, has_prev_data # build a dictionary of volume_id -> transfer transfers = {t.volume_id: t for t in transfer_list(request, search_opts=search_opts)} if VERSIONS.active > 1 and paginate: page_size = utils.get_page_size(request) # sort_key and sort_dir deprecated in kilo, use sort # if pagination is true, we use a single sort parameter # by default, it is "created_at" sort = "created_at:" + sort_dir for v in c_client.volumes.list(search_opts=search_opts, limit=page_size + 1, marker=marker, sort=sort): v.transfer = transfers.get(v.id) volumes.append(Volume(v)) volumes, has_more_data, has_prev_data = update_pagination(volumes, page_size, marker, sort_dir) else: for v in c_client.volumes.list(search_opts=search_opts): v.transfer = transfers.get(v.id) volumes.append(Volume(v)) return volumes, has_more_data, has_prev_data
def image_list_detailed(request, marker=None, filters=None, paginate=False): limit = getattr(settings, 'API_RESULT_LIMIT', 1000) page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit kwargs = {'filters': filters or {}} if marker: kwargs['marker'] = marker images_iter = glanceclient(request).images.list(page_size=request_size, limit=limit, **kwargs) has_more_data = False if paginate: images = list(itertools.islice(images_iter, request_size)) if len(images) > page_size: images.pop(-1) has_more_data = True else: images = list(images_iter) return (images, has_more_data)
def protectable_list_instances_paged(request, protectable_type, search_opts=None, marker=None, limit=None, sort_key=None, sort_dir=None, sort=None, paginate=False, reversed_order=False): has_more_data = False has_prev_data = False if paginate: if reversed_order: sort_dir = 'desc' if sort_dir == 'asc' else 'asc' page_size = utils.get_page_size(request) instances = karborclient(request).protectables.list_instances( protectable_type, search_opts=search_opts, marker=marker, limit=page_size + 1, sort_key=sort_key, sort_dir=sort_dir, sort=sort) instances, has_more_data, has_prev_data = update_pagination( instances, page_size, marker, sort_dir, sort_key, reversed_order) else: instances = karborclient(request).protectables.list_instances( protectable_type, search_opts=search_opts, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir, sort=sort) return (instances, has_more_data, has_prev_data)
def tenant_list(request, paginate=False, marker=None, domain=None, user=None, admin=True, filters=None): manager = VERSIONS.get_project_manager(request, admin=admin) page_size = utils.get_page_size(request) limit = None if paginate: limit = page_size + 1 has_more_data = False # if requesting the projects for the current user, # return the list from the cache if user == request.user.id: tenants = request.user.authorized_tenants elif VERSIONS.active < 3: tenants = manager.list(limit, marker) if paginate and len(tenants) > page_size: tenants.pop(-1) has_more_data = True else: kwargs = {"domain": domain, "user": user} if filters is not None: kwargs.update(filters) tenants = manager.list(**kwargs) return (tenants, has_more_data)
def tenant_list(request, paginate=False, marker=None, domain=None, user=None, admin=True, filters=None): manager = VERSIONS.get_project_manager(request, admin=admin) page_size = utils.get_page_size(request) limit = None if paginate: limit = page_size + 1 has_more_data = False # if requesting the projects for the current user, # return the list from the cache if user == request.user.id: tenants = request.user.authorized_tenants elif VERSIONS.active < 3: tenants = manager.list(limit, marker) if paginate and len(tenants) > page_size: tenants.pop(-1) has_more_data = True # V3 API else: domain_id = get_effective_domain_id(request) kwargs = { "domain": domain_id, "user": user } if filters is not None: kwargs.update(filters) tenants = manager.list(**kwargs) return tenants, has_more_data
def server_list(request, search_opts=None, all_tenants=False): page_size = utils.get_page_size(request) c = novaclient(request) paginate = False if search_opts is None: search_opts = {} elif 'paginate' in search_opts: paginate = search_opts.pop('paginate') if paginate: search_opts['limit'] = page_size + 1 if all_tenants: search_opts['all_tenants'] = True else: search_opts['project_id'] = request.user.tenant_id servers = [Server(s, request) for s in c.servers.list(True, search_opts)] has_more_data = False if paginate and len(servers) > page_size: servers.pop(-1) has_more_data = True elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT', 1000): has_more_data = True return (servers, has_more_data)
def test_float_default_value(self): requested_url = '/project/instances/' request = self.factory.get(requested_url) request.session['horizon_pagesize'] = 'not int-able' default = 30.1 expected = 30 self.assertEqual(functions.get_page_size(request, default), expected)
def volume_backup_list_paged(request, marker=None, paginate=False, sort_dir="desc"): has_more_data = False has_prev_data = False backups = [] c_client = cinderclient(request) if c_client is None: return backups, has_more_data, has_prev_data if VERSIONS.active > 1 and paginate: page_size = utils.get_page_size(request) # sort_key and sort_dir deprecated in kilo, use sort # if pagination is true, we use a single sort parameter # by default, it is "created_at" sort = 'created_at:' + sort_dir for b in c_client.backups.list(limit=page_size + 1, marker=marker, sort=sort): backups.append(VolumeBackup(b)) backups, has_more_data, has_prev_data = update_pagination( backups, page_size, marker, sort_dir) else: for b in c_client.backups.list(): backups.append(VolumeBackup(b)) return backups, has_more_data, has_prev_data
def volume_snapshot_list_paged(request, search_opts=None, marker=None, paginate=False, sort_dir="desc"): has_more_data = False has_prev_data = False snapshots = [] c_client = cinderclient(request) if c_client is None: return snapshots, has_more_data, has_more_data if VERSIONS.active > 1 and paginate: page_size = utils.get_page_size(request) # sort_key and sort_dir deprecated in kilo, use sort # if pagination is true, we use a single sort parameter # by default, it is "created_at" sort = 'created_at:' + sort_dir for s in c_client.volume_snapshots.list(search_opts=search_opts, limit=page_size + 1, marker=marker, sort=sort): snapshots.append(VolumeSnapshot(s)) snapshots, has_more_data, has_prev_data = update_pagination( snapshots, page_size, marker, sort_dir) else: for s in c_client.volume_snapshots.list(search_opts=search_opts): snapshots.append(VolumeSnapshot(s)) return snapshots, has_more_data, has_prev_data
def get_data(self): page_offset = self.request.GET.get('page_offset') ts_mode = self.request.GET.get('ts_mode') ts_offset = self.request.GET.get('ts_offset') contacts = [] object_id = self.kwargs['id'] name = self.kwargs['name'] if not ts_mode: ts_mode = alarm_history_default_ts_format if not page_offset: page_offset = 0 limit = utils.get_page_size(self.request) try: results = api.monitor.alarm_history(self.request, object_id, page_offset, limit) paginator = Paginator(results, limit) contacts = paginator.page(1) except EmptyPage: contacts = paginator.page(paginator.num_pages) except Exception: messages.error(self.request, _("Could not retrieve alarm history for %s") % object_id) try: return transform_alarm_history(contacts, name, ts_mode, ts_offset) except ValueError as err: LOG.warning('Failed to transform alarm history due to %s' % err.message) messages.warning(self.request, _('Failed to present alarm ' 'history')) return []
def flavor_list_paged(request, is_public=True, get_extras=False, marker=None, paginate=False, sort_key="name", sort_dir="desc", reversed_order=False): """Get the list of available instance sizes (flavors).""" has_more_data = False has_prev_data = False if paginate: if reversed_order: sort_dir = 'desc' if sort_dir == 'asc' else 'asc' page_size = utils.get_page_size(request) flavors = _nova.novaclient(request).flavors.list(is_public=is_public, marker=marker, limit=page_size + 1, sort_key=sort_key, sort_dir=sort_dir) flavors, has_more_data, has_prev_data = update_pagination( flavors, page_size, marker, reversed_order) else: flavors = _nova.novaclient(request).flavors.list(is_public=is_public) if get_extras: for flavor in flavors: flavor.extras = flavor_get_extras(request, flavor.id, True, flavor) return (flavors, has_more_data, has_prev_data)
def _nodes_info(self): page_size = functions.get_page_size(self.request) prev_marker = self.request.GET.get( self.table_classes[0]._meta.prev_pagination_param, None) if prev_marker is not None: sort_dir = 'asc' marker = prev_marker else: sort_dir = 'desc' marker = self.request.GET.get( self.table_classes[0]._meta.pagination_param, None) nodes = self._nodes if marker: node_ids = [node.uuid for node in self._nodes] position = node_ids.index(marker) if sort_dir == 'asc': start = max(0, position - page_size) end = position else: start = position + 1 end = start + page_size else: start = 0 end = page_size prev = start != 0 more = len(nodes) > end return nodes[start:end], prev, more
def flavor_list_paged(request, is_public=True, get_extras=False, marker=None, paginate=False, sort_key="name", sort_dir="desc", reversed_order=False): """Get the list of available instance sizes (flavors).""" has_more_data = False has_prev_data = False if paginate: if reversed_order: sort_dir = 'desc' if sort_dir == 'asc' else 'asc' page_size = utils.get_page_size(request) flavors = novaclient(request).flavors.list(is_public=is_public, marker=marker, limit=page_size + 1, sort_key=sort_key, sort_dir=sort_dir) flavors, has_more_data, has_prev_data = update_pagination( flavors, page_size, marker, reversed_order) else: flavors = novaclient(request).flavors.list(is_public=is_public) if get_extras: for flavor in flavors: flavor.extras = flavor_get_extras(request, flavor.id, True, flavor) return (flavors, has_more_data, has_prev_data)
def get_tenants(request, paginate=False, marker=None, domain=None, user=None, admin=True, filters=None): ksclient = get_admin_ksclient() page_size = utils.get_page_size(request) limit = None if paginate: limit = page_size + 1 has_more = False # if requesting the projects for the current user, # return the list from the cache if user == request.user.id: projects = request.user.authorized_tenants elif keystone.VERSIONS.active < 3: projects = ksclient.tenants.list(limit, marker) if paginate and len(projects) > page_size: projects.pop(-1) has_more = True else: kwargs = {"domain": domain, "user": user} if filters is not None: kwargs.update(filters) projects = ksclient.projects.list(**kwargs) return (projects, has_more)
def filter_syslog_list(request, marker = None, paginate = False, opt = None): limit = 500 page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit syslogs, count = get_filter_syslogs_from_db(limit=request_size, marker = marker, opt = opt) # has_prev_data = False has_more_data = False if paginate: # images = list(itertools.islice(images_iter, request_size)) # first and middle page condition if len(syslogs) > page_size: syslogs.pop(-1) has_more_data = True # middle page condition if marker is not None: pass # has_prev_data = True # last page condition elif marker is not None: pass # has_prev_data = True return (syslogs, has_more_data, count)
def project_list(request, paginate=False, marker=None, domain=None, user=None, admin=True, filters=None): keystoneclient = get_keystone_client() page_size = utils.get_page_size(request) limit = None if paginate: limit = page_size + 1 has_more_data = False # if requesting the projects for the current user, # return the list from the cache if user == request.user.id: projects = request.user.authorized_tenants elif VERSIONS.active < 3: projects = keystoneclient.projects.list(limit, marker) if paginate and len(projects) > page_size: projects.pop(-1) has_more_data = True else: kwargs = { "domain": domain, "user": user } if filters is not None: kwargs.update(filters) projects = keystoneclient.projects.list(**kwargs) return (projects, has_more_data)
def get_data(self): table = self.get_table() # urbane = UrbaneClient( # auth_url=ADMIN_AUTH_URL, # username=ADMIN_USERNAME, # password=ADMIN_PASSWORD, # # urbane client automatically discovers # # Keystone API version from auth_url # # and takes required parameter # tenant=ADMIN_TENANT, # domain=ADMIN_DOMAIN, # region=ADMIN_REGION # ) urbane = get_urbaneclient() table._page_ = int( self.request.GET.get( signups_tables.SignupsTable._meta.pagination_param, 1)) if table._page_ < 1: table._page_ = 1 page_size = utils.get_page_size(self.request) page_range = '%d:%d' % (table._page_, page_size) signups, total = urbane.list(range=page_range) # handle _has_more_data_ self._has_more_data_ = (total // page_size) + 1 > table._page_ return signups
def pagination_list(entity, request, marker='', sort_keys='', sort_dirs='asc', paginate=False, reversed_order=False): """Retrieve a listing of specific entity and handles pagination. :param entity: Requested entity (String) :param request: Request data :param marker: Pagination marker for large data sets: entity id :param sort_keys: Columns to sort results by :param sort_dirs: Sorting Directions (asc/desc). Default:asc :param paginate: If true will perform pagination based on settings. Default:False :param reversed_order: flag to reverse list. Default:False """ limit = getattr(settings, 'API_RESULT_LIMIT', 1000) page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit if reversed_order: sort_dirs = 'desc' if sort_dirs == 'asc' else 'asc' api = mistralclient(request) entities_iter = getattr(api, entity).list( marker, limit, sort_keys, sort_dirs ) has_prev_data = has_more_data = False if paginate: entities = list(itertools.islice(entities_iter, request_size)) # first and middle page condition if len(entities) > page_size: entities.pop(-1) has_more_data = True # middle page condition if marker is not None: has_prev_data = True # first page condition when reached via prev back elif reversed_order and marker is not None: has_more_data = True # last page condition elif marker is not None: has_prev_data = True # restore the original ordering here if reversed_order: entities = sorted(entities, key=lambda ent: (getattr(ent, sort_keys) or '').lower(), reverse=(sort_dirs == 'desc') ) else: entities = list(entities_iter) return entities, has_more_data, has_prev_data
def test_bad_cookie_value(self): requested_url = '/project/instances/' request = self.factory.get(requested_url) if 'horizon_pagesize' in request.session: del request.session['horizon_pagesize'] request.COOKIES['horizon_pagesize'] = 'not int-able' default = 30 self.assertEqual(functions.get_page_size(request, default), default)
def test_bad_cookie_value(self): requested_url = "/project/instances/" request = self.factory.get(requested_url) if "horizon_pagesize" in request.session: del request.session["horizon_pagesize"] request.COOKIES["horizon_pagesize"] = "not int-able" default = 30 self.assertEqual(functions.get_page_size(request, default), default)
def catalog_list_detailed(request, marker=None, limit=None, sort_key='catalog_id', sort_dir='desc', force_show_deleted=None, filters=None, paginate=False): limit = limit or getattr(settings, 'API_RESULT_LIMIT', 1000) page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit kwargs = {'limit': limit, 'sort_dir': sort_dir, 'sort_key': sort_key, } if marker is not None: kwargs['marker'] = marker if force_show_deleted is not None: kwargs['force_show_deleted'] = force_show_deleted if filters is not None: kwargs.update(filters) catalog_list = afloclient(request).catalogs.list(kwargs) has_prev_data = False has_more_data = False if paginate: catalogs = list(itertools.islice(catalog_list, request_size)) if sort_dir == 'desc': if len(catalogs) > page_size: catalogs.pop(-1) has_more_data = True else: has_more_data = False if marker is not None: has_prev_data = True else: if len(catalogs) > page_size: catalogs.pop(-1) has_prev_data = True else: has_prev_data = False has_more_data = True catalogs.reverse() else: catalogs = list(catalog_list) return (catalogs, has_prev_data, has_more_data)
def server_list_paged(request, search_opts=None, detailed=True, sort_dir="desc"): has_more_data = False has_prev_data = False nova_client = get_novaclient_with_locked_status(request) page_size = utils.get_page_size(request) search_opts = {} if search_opts is None else search_opts marker = search_opts.get('marker', None) if not search_opts.get('all_tenants', False): search_opts['project_id'] = request.user.tenant_id if search_opts.pop('paginate', False): reversed_order = sort_dir == "asc" LOG.debug("Notify received on deleted server: %r", ('server_deleted' in request.session)) deleted = request.session.pop('server_deleted', None) view_marker = 'possibly_deleted' if deleted and marker else 'ok' search_opts['marker'] = deleted if deleted else marker search_opts['limit'] = page_size + 1 search_opts['sort_dir'] = sort_dir servers = [ Server(s, request) for s in nova_client.servers.list(detailed, search_opts) ] if view_marker == 'possibly_deleted': if not servers: view_marker = 'head_deleted' search_opts['sort_dir'] = 'desc' reversed_order = False servers = [ Server(s, request) for s in nova_client.servers.list(detailed, search_opts) ] if not servers: view_marker = 'tail_deleted' search_opts['sort_dir'] = 'asc' reversed_order = True servers = [ Server(s, request) for s in nova_client.servers.list(detailed, search_opts) ] (servers, has_more_data, has_prev_data) = update_pagination(servers, page_size, marker, reversed_order) has_prev_data = (False if view_marker == 'head_deleted' else has_prev_data) has_more_data = (False if view_marker == 'tail_deleted' else has_more_data) else: servers = [ Server(s, request) for s in nova_client.servers.list(detailed, search_opts) ] return (servers, has_more_data, has_prev_data)
def get_data(self): self._prev = self._more = False prev_marker = self.request.GET.get( networks_tables.NetworksTable._meta.prev_pagination_param) if prev_marker is not None: page_reverse = True marker = prev_marker else: page_reverse = False marker = self.request.GET.get( networks_tables.NetworksTable._meta.pagination_param) search_opts = self.get_filters({}) search_opts['retrieve_all'] = False search_opts['page_reverse'] = page_reverse page_size = utils.get_page_size(self.request) or getattr( settings, 'API_RESULT_LIMIT', 1000) search_opts['limit'] = page_size + 1 search_opts['marker'] = marker or '' try: networks = api.neutron.network_list_admin(self.request, **search_opts) except Exception: networks = [] msg = _('Network list can not be retrieved.') exceptions.handle(self.request, msg) if networks: self.exception = False tenant_dict = self._get_tenant_list() self._prev = False self._more = False if len(networks) > page_size: if page_reverse: networks = networks[1:len(networks)] else: networks.pop() self._more = True if marker is not None: self._prev = True elif page_reverse and marker is not None: self._more = True elif marker is not None: self._prev = True for n in networks: # Set tenant name tenant = tenant_dict.get(n.tenant_id, None) n.tenant_name = getattr(tenant, 'name', None) n.num_agents = self._get_agents_data(n.id) if self.exception: msg = _('Unable to list dhcp agents hosting network.') exceptions.handle(self.request, msg) return networks
def _populate_request_size_and_page_size(request, paginate=False): limit = getattr(settings, 'API_RESULT_LIMIT', 1000) page_size = utils.get_page_size(request) if paginate: request_size = page_size + 1 else: request_size = limit return page_size, request_size
def get_initial(self): return { 'language': self.request.session.get( settings.LANGUAGE_COOKIE_NAME, self.request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, self.request.LANGUAGE_CODE)), 'timezone': self.request.session.get( 'django_timezone', self.request.COOKIES.get('django_timezone', 'UTC')), 'pagesize': utils.get_page_size(self.request)}
def get_context_data(self, **kwargs): if not policy.check( (('monitoring', 'monitoring:monitoring'), ), self.request): raise exceptions.NotAuthorized() context = super(IndexView, self).get_context_data(**kwargs) num_results = 0 contacts = [] prev_page_stack = [] page_offset = self.request.GET.get('page_offset') if 'prev_page_stack' in self.request.session: prev_page_stack = self.request.session['prev_page_stack'] if page_offset is None: page_offset = 0 prev_page_stack = [] else: page_offset = int(page_offset) limit = utils.get_page_size(self.request) try: # To judge whether there is next page, get limit + 1 results = api.monitor.alarmdef_list(self.request, page_offset, limit + 1) num_results = len(results) paginator = Paginator(results, limit) contacts = paginator.page(1) except EmptyPage: contacts = paginator.page(paginator.num_pages) except Exception: messages.error(self.request, _("Could not retrieve alarm definitions")) return context context["contacts"] = contacts if num_results < limit + 1: context["page_offset"] = None else: context["page_offset"] = page_offset + limit if page_offset in prev_page_stack: index = prev_page_stack.index(page_offset) prev_page_stack = prev_page_stack[0:index] prev_page_offset = prev_page_stack[-1] if prev_page_stack else None if prev_page_offset is not None: context["prev_page_offset"] = prev_page_offset if len(prev_page_stack) > PREV_PAGE_LIMIT: del prev_page_stack[0] prev_page_stack.append(page_offset) self.request.session['prev_page_stack'] = prev_page_stack return context
def get_volumes_data(self): volumes = [] search_opts = {'all_tenants': True} prev_marker = self.request.GET.get( volumes_tables.VolumesTable._meta.prev_pagination_param) if prev_marker is not None: sort_dir = 'asc' marker = prev_marker else: sort_dir = 'desc' marker = self.request.GET.get( volumes_tables.VolumesTable._meta.pagination_param) search_opts['sort_dir']=sort_dir search_opts['marker']=marker page_size = utils.get_page_size(self.request) or getattr(settings, 'API_RESULT_LIMIT', 1000) search_opts['limit']=page_size + 1 volumes = self._get_volumes(search_opts) self._prev = False self._more = False if len(volumes) > page_size: volumes.pop() self._more = True if marker is not None: self._prev = True elif sort_dir == 'asc' and marker is not None: self._more = True elif marker is not None: self._prev = True if prev_marker is not None: volumes = sorted(volumes, key=attrgetter('created_at'), reverse=True) instances = self._get_instances(search_opts={'all_tenants': True}) volume_ids_with_snapshots = self._get_volumes_ids_with_snapshots( search_opts={'all_tenants': True}) self._set_volume_attributes( volumes, instances, volume_ids_with_snapshots) # Gather our tenants to correlate against IDs try: tenants, has_more = keystone.tenant_list(self.request) except Exception: tenants = [] msg = _('Unable to retrieve volume project information.') exceptions.handle(self.request, msg) tenant_dict = SortedDict([(t.id, t) for t in tenants]) for volume in volumes: tenant_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None) tenant = tenant_dict.get(tenant_id, None) volume.tenant_name = getattr(tenant, "name", None) return volumes
def _get_routers(self, search_opts=None): self._prev = self._more = False prev_marker = self.request.GET.get( rtbl.RoutersTable._meta.prev_pagination_param) if prev_marker is not None: page_reverse = True marker = prev_marker else: page_reverse = False marker = self.request.GET.get( rtbl.RoutersTable._meta.pagination_param) search_opts = self.get_filters({}) search_opts['retrieve_all'] = False search_opts['page_reverse'] = page_reverse page_size = utils.get_page_size(self.request) or getattr( settings, 'API_RESULT_LIMIT', 1000) search_opts['limit'] = page_size + 1 search_opts['marker'] = marker or '' try: routers = api.neutron.router_list_admin(self.request, **search_opts) except Exception: routers = [] exceptions.handle(self.request, _('Unable to retrieve router list.')) if routers: tenant_dict = self._get_tenant_list() ext_net_dict = self._list_external_networks() self._prev = False self._more = False if len(routers) > page_size: if page_reverse: routers = routers[1:len(routers)] else: routers.pop() self._more = True if marker is not None: self._prev = True elif page_reverse and marker is not None: self._more = True elif marker is not None: self._prev = True for r in routers: # Set tenant name tenant = tenant_dict.get(r.tenant_id, None) r.tenant_name = getattr(tenant, 'name', None) # If name is empty use UUID as name r.name = r.name_or_id # Set external network name self._set_external_network(r, ext_net_dict) return routers
def get_initial(self): default_tz = getattr(settings, 'TIME_ZONE', 'UTC') return { 'language': self.request.session.get( settings.LANGUAGE_COOKIE_NAME, self.request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME, self.request.LANGUAGE_CODE)), 'timezone': self.request.session.get( 'django_timezone', self.request.COOKIES.get('django_timezone', default_tz)), 'pagesize': utils.get_page_size(self.request), 'instance_log_length': utils.get_log_length(self.request)}
def get_data(self): prev_marker = self.request.GET.get( tables.MarkedImagesTable._meta.prev_pagination_param, None) if prev_marker is not None: sort_dir = 'asc' marker = prev_marker else: sort_dir = 'desc' marker = self.request.GET.get( tables.MarkedImagesTable._meta.pagination_param, None) page_size = utils.get_page_size(self.request) request_size = page_size + 1 kwargs = {'filters': {}} if marker: kwargs['marker'] = marker kwargs['sort_dir'] = sort_dir images = [] self._prev = False self._more = False glance_v2_client = glance.glanceclient(self.request, "2") try: images_iter = glance_v2_client.images.list(**kwargs) except Exception: msg = _('Unable to retrieve list of images') uri = reverse('horizon:app-catalog:catalog:index') exceptions.handle(self.request, msg, redirect=uri) marked_images_iter = forms.filter_murano_images(images_iter, request=self.request) images = list(itertools.islice(marked_images_iter, request_size)) # first and middle page condition if len(images) > page_size: images.pop(-1) self._more = True # middle page condition if marker is not None: self._prev = True # first page condition when reached via prev back elif sort_dir == 'asc' and marker is not None: self._more = True # last page condition elif marker is not None: self._prev = True if prev_marker is not None: images.reverse() return images
def notification_list(request, filters=None, marker='', paginate=False): """return notifications list """ page_size = utils.get_page_size(request) kwargs = get_request_param(marker, paginate, filters, page_size) entities_iter = openstack_connection(request).notifications(**kwargs) has_prev_data = has_more_data = False if paginate: entities, has_more_data, has_prev_data = pagination_process( entities_iter, kwargs['limit'], page_size, marker) else: entities = list(entities_iter) return entities, has_more_data, has_prev_data
def volume_list_paged(request, search_opts=None, marker=None, paginate=False, sort_dir="desc"): """To see all volumes in the cloud as an admin you can pass in a special search option: {'all_tenants': 1} """ has_more_data = False has_prev_data = False volumes = [] c_client = cinderclient(request) if c_client is None: return volumes, has_more_data, has_prev_data # build a dictionary of volume_id -> transfer transfers = { t.volume_id: t for t in transfer_list(request, search_opts=search_opts) } if VERSIONS.active > 1 and paginate: page_size = utils.get_page_size(request) # sort_key and sort_dir deprecated in kilo, use sort # if pagination is true, we use a single sort parameter # by default, it is "created_at" sort = 'created_at:' + sort_dir for v in c_client.volumes.list(search_opts=search_opts, limit=page_size + 1, marker=marker, sort=sort): v.transfer = transfers.get(v.id) volumes.append(Volume(v)) if len(volumes) > page_size: has_more_data = True volumes.pop() if marker is not None: has_prev_data = True # first page condition when reached via prev back elif sort_dir == 'asc' and marker is not None: has_more_data = True # last page condition elif marker is not None: has_prev_data = True else: for v in c_client.volumes.list(search_opts=search_opts): v.transfer = transfers.get(v.id) volumes.append(Volume(v)) return volumes, has_more_data, has_prev_data