def geolocate_post(request): data = request.validated session = request.db_slave_session result = None api_key = request.GET.get('key', None) if api_key is not None: if data['wifiAccessPoints']: result = search_wifi_ap(session, data) else: result = search_cell_tower(session, data) if result is None: result = HTTPNotFound() result.content_type = 'application/json' result.body = NOT_FOUND return result return { "location": { "lat": result['lat'], "lng": result['lon'], }, "accuracy": float(result['accuracy']), }
def geolocate_view(request): data, errors = preprocess_request( request, schema=GeoLocateSchema(), extra_checks=(geolocate_validator, ), response=JSONParseError, accept_empty=True, ) data = map_data(data) session = request.db_slave_session result = search_all_sources( session, 'geolocate', data, client_addr=request.client_addr, geoip_db=request.registry.geoip_db, api_key_log=getattr(request, 'api_key_log', False), api_key_name=getattr(request, 'api_key_name', None)) if not result: result = HTTPNotFound() result.content_type = 'application/json' result.body = NOT_FOUND return result return { "location": { "lat": result['lat'], "lng": result['lon'], }, "accuracy": float(result['accuracy']), }
def JsonHTTPNotFound(message=None): response = HTTPNotFound() response.content_type = 'application/json' if message: if isinstance(message, dict): msg = json.dumps(message) response.text = unicode(msg) return response
def __call__(self, environ, start_response): not_found = HTTPNotFound() accept = environ.get('HTTP_ACCEPT', '') if accept and 'json' in accept: not_found.body = format_error_response_to_json(not_found) not_found.content_type = 'application/json' return not_found(environ, start_response)
def geolocate_view(request): heka_client = get_heka_client() data, errors = preprocess_request( request, schema=GeoLocateSchema(), extra_checks=(geolocate_validator, ), response=JSONError, accept_empty=True, ) session = request.db_slave_session result = None if data and data['wifiAccessPoints']: result = search_wifi_ap(session, data) if result is not None: heka_client.incr('geolocate.wifi_hit') heka_client.timer_send('geolocate.accuracy.wifi', result['accuracy']) elif data: result = search_cell_tower(session, data) if result is not None: heka_client.incr('geolocate.cell_hit') heka_client.timer_send('geolocate.accuracy.cell', result['accuracy']) if result is None: result = search_cell_tower_lac(session, data) if result is not None: heka_client.incr('geolocate.cell_lac_hit') heka_client.timer_send('geolocate.accuracy.cell_lac', result['accuracy']) if result is None and request.client_addr: result = search_geoip(request.registry.geoip_db, request.client_addr) if result is not None: heka_client.incr('geolocate.geoip_hit') heka_client.timer_send('geolocate.accuracy.geoip', result['accuracy']) if result is None: heka_client.incr('geolocate.miss') result = HTTPNotFound() result.content_type = 'application/json' result.body = NOT_FOUND return result return { "location": { "lat": result['lat'], "lng": result['lon'], }, "accuracy": float(result['accuracy']), }
def process_single(request): stats_client = get_stats_client() locate_data, locate_errors = preprocess_request( request, schema=GeoLocateSchema(), extra_checks=(geolocate_validator, ), response=JSONParseError, accept_empty=True, ) data, errors = preprocess_request( request, schema=GeoSubmitSchema(), extra_checks=(geosubmit_validator,), response=None, ) data = {'items': [data]} nickname = request.headers.get('X-Nickname', u'') email = request.headers.get('X-Email', u'') upload_items = flatten_items(data) errors = process_upload(nickname, email, upload_items) if errors is not SENTINEL and errors: # pragma: no cover stats_client.incr('geosubmit.upload.errors', len(errors)) first_item = data['items'][0] if first_item['latitude'] == -255 or first_item['longitude'] == -255: data = map_data(data['items'][0]) session = request.db_slave_session result = search_all_sources( session, 'geosubmit', data, client_addr=request.client_addr, geoip_db=request.registry.geoip_db, api_key_log=getattr(request, 'api_key_log', False), api_key_name=getattr(request, 'api_key_name', None)) else: result = {'lat': first_item['latitude'], 'lon': first_item['longitude'], 'accuracy': first_item['accuracy']} if result is None: stats_client.incr('geosubmit.miss') result = HTTPNotFound() result.content_type = 'application/json' result.body = NOT_FOUND return result return { "location": { "lat": result['lat'], "lng": result['lon'], }, "accuracy": float(result['accuracy']), }
def save_idea(request): """Update this idea. In case the ``parentId`` is changed, handle all ``IdeaLink`` changes and send relevant ideas on the socket.""" discussion_id = int(request.matchdict['discussion_id']) idea_id = request.matchdict['id'] idea_data = json.loads(request.body) #Idea.default_db.execute('set transaction isolation level read committed') # Special items in TOC, like unsorted posts. if idea_id in ['orphan_posts']: return {'ok': False, 'id': Idea.uri_generic(idea_id)} idea = Idea.get_instance(idea_id) if not idea: raise HTTPNotFound("No such idea: %s" % (idea_id)) if isinstance(idea, RootIdea): raise HTTPBadRequest("Cannot edit root idea.") discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) if(idea.discussion_id != discussion.id): raise HTTPBadRequest( "Idea from discussion %s cannot saved from different discussion (%s)." % (idea.discussion_id,discussion.id )) simple_fields = { 'shortTitle': 'short_title', 'longTitle': 'long_title', 'definition': 'definition', 'message_view_override': 'message_view_override', 'messages_in_parent': 'messages_in_parent', } for key, attr_name in simple_fields.iteritems(): if key in idea_data: setattr(idea, attr_name, idea_data[key]) if 'parentId' in idea_data and idea_data['parentId'] is not None: # TODO: Make sure this is sent as a list! parent = Idea.get_instance(idea_data['parentId']) if not parent: raise HTTPNotFound("Missing parentId %s" % (idea_data['parentId'])) # calculate it early to maximize contention. prev_ancestors = parent.get_all_ancestors() new_ancestors = set() order = idea_data.get('order', 0.0) for parent_link in idea.source_links: # still assuming there's only one. pl_parent = parent_link.source pl_ancestors = pl_parent.get_all_ancestors() new_ancestors.update(pl_ancestors) if parent_link.source != parent: parent_link.copy(True) parent_link.source = parent parent.db.expire(parent, ['target_links']) parent.db.expire(pl_parent, ['target_links']) for ancestor in pl_ancestors: if ancestor in prev_ancestors: break ancestor.send_to_changes() for ancestor in prev_ancestors: if ancestor in new_ancestors: break ancestor.send_to_changes() parent_link.order = order parent_link.db.expire(parent_link.source, ['target_links']) parent_link.source.send_to_changes() parent_link.db.flush() idea.send_to_changes() return {'ok': True, 'id': idea.uri() }
def blog_view(request): blog_id = int(request.matchdict.get('id', -1)) entry = BlogRecordService.by_id(blog_id, request) if not entry: return HTTPNotFound() return {'entry': entry}
def notfound(request): return HTTPNotFound()
def get_manifest(request): try: return request.services["fetch_document_manifest"]( id=request.matchdict["document_id"]) except exceptions.DoesNotExist as exc: raise HTTPNotFound(exc)
def handle_request(self, request): attrs = request.__dict__ registry = attrs['registry'] request.request_iface = IRequest context = None routes_mapper = self.routes_mapper debug_routematch = self.debug_routematch adapters = registry.adapters has_listeners = registry.has_listeners notify = registry.notify logger = self.logger has_listeners and notify(NewRequest(request)) # find the root object root_factory = self.root_factory if routes_mapper is not None: info = routes_mapper(request) match, route = info['match'], info['route'] if route is None: if debug_routematch: msg = ('no route matched for url %s' % request.url) logger and logger.debug(msg) else: attrs['matchdict'] = match attrs['matched_route'] = route if debug_routematch: msg = ('route matched for url %s; ' 'route_name: %r, ' 'path_info: %r, ' 'pattern: %r, ' 'matchdict: %r, ' 'predicates: %r' % (request.url, route.name, request.path_info, route.pattern, match, ', '.join( [p.text() for p in route.predicates]))) logger and logger.debug(msg) request.request_iface = registry.queryUtility(IRouteRequest, name=route.name, default=IRequest) root_factory = route.factory or self.root_factory root = root_factory(request) attrs['root'] = root # find a context traverser = adapters.queryAdapter(root, ITraverser) if traverser is None: traverser = ResourceTreeTraverser(root) tdict = traverser(request) context, view_name, subpath, traversed, vroot, vroot_path = ( tdict['context'], tdict['view_name'], tdict['subpath'], tdict['traversed'], tdict['virtual_root'], tdict['virtual_root_path']) attrs.update(tdict) has_listeners and notify(ContextFound(request)) # find a view callable context_iface = providedBy(context) view_callable = adapters.lookup( (IViewClassifier, request.request_iface, context_iface), IView, name=view_name, default=None) # invoke the view callable if view_callable is None: if self.debug_notfound: msg = ('debug_notfound of url %s; path_info: %r, ' 'context: %r, view_name: %r, subpath: %r, ' 'traversed: %r, root: %r, vroot: %r, ' 'vroot_path: %r' % (request.url, request.path_info, context, view_name, subpath, traversed, root, vroot, vroot_path)) logger and logger.debug(msg) else: msg = request.path_info raise HTTPNotFound(msg) else: try: response = view_callable(context, request) except PredicateMismatch: # look for other views that meet the predicate # criteria for iface in context_iface.__sro__[1:]: previous_view_callable = view_callable view_callable = adapters.lookup( (IViewClassifier, request.request_iface, iface), IView, name=view_name, default=None) # intermediate bases may lookup same view_callable if view_callable is previous_view_callable: continue if view_callable is not None: try: response = view_callable(context, request) break except PredicateMismatch: pass else: raise return response
def _get_external_sheet(self): external = self.propsheets.get('external', {}) if external.get('service') == 's3': return external else: raise HTTPNotFound()
def get_item(self, id): self.item = self.get_model_item(id) self.main_template = 'item.mako' if self.item is None: raise HTTPNotFound() return {'item': self.item}
def admin_graph(request): graph_name = request.matchdict['name'] try: import pygal except ImportError: raise HTTPNotFound() def get(name, default=None, type=str): try: return type(request.GET.get(name, default)) except ValueError: raise HTTPBadRequest() pygalopts = { 'js': [ request.static_url('ccvpn:static/pygal/svg.jquery.js'), request.static_url('ccvpn:static/pygal/pygal-tooltips.js') ] } period = get('period', 'm') if period == 'm': period_time = timedelta(days=30) if period == 'y': period_time = timedelta(days=365) if graph_name == 'users': period = get('period', 'm') chart = pygal.Line(fill=True, x_label_rotation=75, show_legend=False, **pygalopts) chart.title = 'Users (%s)' % period chart.x_labels = [] values = [] gen = last_days(30) if period == 'm' else last_months(12) users = DBSession.query(User).all() for m in gen: filter_ = time_filter_future(period, m, lambda o: o.signup_date) users_filtered = filter(filter_, users) values.append(len(list(users_filtered))) chart.x_labels.append('%s/%s/%s' % (m.year, m.month, m.day)) chart.add('Users', values) return Response(chart.render(), content_type='image/svg+xml') elif graph_name == 'income': method = get('method', 0, int) if not method in request.payment_methods: raise HTTPNotFound() method_name = request.payment_methods[method].name chart = pygal.StackedBar(x_label_rotation=75, show_legend=True, **pygalopts) chart.title = 'Income (%s, %s)' % (method_name, period) orders = DBSession.query(Order) \ .filter(Order.start_date > datetime.now() - period_time) \ .filter(Order.method == method) \ .filter(Order.paid == True) \ .all() # Prepare value dict values = {} for order in orders: t = order.time if t not in values: values[t] = [] chart.x_labels = [] gen = last_days(30) if period == 'm' else last_months(12) for m in gen: filter_ = time_filter(period, m, lambda o: o.start_date) orders_date = list(filter(filter_, orders)) for duration in values.keys(): filter_ = lambda o: o.time == duration orders_dd = list(filter(filter_, orders_date)) sum_ = sum(o.paid_amount for o in orders_dd) values[duration].append(round(sum_, 4) or None) chart.x_labels.append('%s' % m) for time, v in values.items(): label = '%sd' % time.days chart.add(label, v) return Response(chart.render(), content_type='image/svg+xml') else: raise HTTPNotFound()
def charts_data(request): """ Handles charting from UI generated charts """ # path for user testing out the chart ids_to_override = None chart = request.context.chart chart.migrate_json_config() req_type = request.matchdict["key"] if (request.method == "POST" and not request.context.used_uuid and req_type == "data_test_config"): chart_config = copy.deepcopy(request.unsafe_json_body) # for now just throw error in case something weird is found applications = UserService.resources_with_perms( request.user, ["view"], resource_types=["application"]) # CRITICAL - this ensures our resultset is limited to only the ones # user has view permissions all_possible_app_ids = set([app.resource_id for app in applications]) schema = ChartConfigSchema().bind(resources=all_possible_app_ids) schema.deserialize(chart_config) filter_settings = build_filter_settings_from_chart_config( request, chart_config) else: # path for everyone else viewing the chart using UUID/public or not # ids_to_override will only work here because # initially it was validated # in dashboard_chart_save() request - so at this point its considered # valid chart_config = chart.config if not chart_config: return {} ids_to_override = [chart_config["resource"]] filter_settings = build_filter_settings_from_chart_config( request, chart_config, override_app_ids=ids_to_override) if not chart_config: return HTTPNotFound() # send chartype so client knows how to render the result chart_type = chart_config.get("chartType") # we always want to use the POST version of chart type for preview purposes # as priority if not chart_type: chart_type = chart.config.get("chartType") es_config = transform_json_to_es_config(request, chart_config, filter_settings, ids_to_override=ids_to_override) query = es_config["query"] if not es_config["index_names"]: return { "name": "", "chart_type": chart_type, "parent_agg": es_config["parent_agg"], "series": [], "system_labels": {}, "groups": [], "rect_regions": [], "categories": [], } result = Datastores.es.search(body=query, index=es_config["index_names"], doc_type="log", size=0) series, info_dict = parse_es_result(result, es_config, json_config=chart_config) regions = [] if req_type == "data_rule_config": json_body = copy.deepcopy(request.unsafe_json_body) rule_config = json_body.get("rule") field_mappings = json_body.get("mappings") rule_obj = RuleService.rule_from_config(rule_config, field_mappings, info_dict["system_labels"]) parent_agg = chart_config.get("parentAgg") if parent_agg and parent_agg["type"] == "time_histogram": for step in series: if rule_obj.match(step): iv = time_deltas[parent_agg["config"]["interval"]] step_start = step["key"].replace(second=0, microsecond=0) regions.append({ "start": step_start, "end": step_start + iv["delta"], "class": "rule1", }) else: events = EventService.for_resource([chart.resource_id], target_uuid=chart.uuid) for event in events: if event.end_date or event.values.get("end_interval"): end_date = event.end_date.replace(second=0, microsecond=0) step_end = event.values.get("end_interval") or end_date else: step_end = datetime.utcnow().replace(second=0, microsecond=0) start_date = event.values["start_interval"] regions.append({ "start": start_date, "end": step_end, "class": "rule1" }) return { "name": chart.name, "chart_type": chart_type, "parent_agg": es_config["parent_agg"], "series": series, "system_labels": info_dict["system_labels"], "rect_regions": regions, "groups": [list(v) for v in info_dict["groups"].values()], "categories": info_dict["categories"], }
def current(self): todo_id = self.request.matchdict.get('id') todo = sample_todos.get(todo_id) if not todo: raise HTTPNotFound() return todo
def read_many_byuser(self, request): """ """ username = request.matchdict['username'] page = int(request.params.get("page", 1)) pagesize = int(request.params.get("pagesize", 10)) if self.Session.query(User).filter( User.username == username).first() == None: raise HTTPNotFound("Requested user does not exist.") items = [] activities_sub_query = self.Session.query( Activity.activity_identifier.label("identifier"), Activity.version, Changeset.timestamp, Changeset.fk_user). \ join(Changeset). \ filter(or_( Activity.fk_status == 2, Activity.fk_status == 3)).subquery(name="sub_act") activities_query = self.Session.query( activities_sub_query, User.username). \ join(User).filter(User.username == username).subquery(name="act") # All active and inactive stakeholders stakeholder_active = self.Session.query(Stakeholder). \ filter(or_( Stakeholder.fk_status == 2, Stakeholder.fk_status == 3)). \ subquery("st_active") # Get the five latest stakeholder by changeset stakeholder_sub_query = self.Session.query( stakeholder_active.c.stakeholder_identifier.label("identifier"), stakeholder_active.c.version, Changeset.timestamp, Changeset.fk_user). \ join( Changeset, Changeset.id == stakeholder_active.c.fk_changeset). \ subquery(name="sub_st") # Join the resulting set to the user table stakeholder_query = self.Session.query( stakeholder_sub_query, User.username). \ join(User).filter(User.username == username).subquery(name="st") query = self.Session.query(activities_query, literal_column( "\'activity\'").label("type")). \ union(self.Session.query( stakeholder_query, literal_column("\'stakeholder\'").label("type"))). \ order_by(desc(activities_query.c.timestamp)).order_by( desc(activities_query.c.version)) for i in query.offset((page - 1) * pagesize).limit(pagesize).all(): items.append({ "type": i.type, "author": i.username, "timestamp": i.timestamp, "version": i.version, "identifier": str(i.identifier) }) return { "items": items, "username": username, "totalitems": query.count(), "pagesize": pagesize, "currentpage": page }
def test_http_not_found(self): from pyramid.httpexceptions import HTTPNotFound response = self.call_fut(HTTPNotFound()) self.assertEqual(response['error_message'], 'Not Found') response = self.call_fut(HTTPNotFound) self.assertEqual(response['error_message'], 'Not Found')
def get_posts(request): """ Query interface on posts Filters have two forms: only_*, is for filters that cannot be reversed (ex: only_synthesis, only_orphan) is_*, is for filters that can be reversed (ex:is_unread=true returns only unread message, is_unread=false returns only read messages) order: can be chronological, reverse_chronological, popularity root_post_id: all posts below the one specified. family_post_id: all posts below the one specified, and all its ancestors. post_reply_to: replies to a given post root_idea_id: all posts associated with the given idea ids: explicit message ids. posted_after_date, posted_before_date: date selection (ISO format) post_author: filter by author """ localizer = request.localizer discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % discussion_id) discussion.import_from_sources() user_id = authenticated_userid(request) or Everyone permissions = get_permissions(user_id, discussion_id) DEFAULT_PAGE_SIZE = 25 page_size = DEFAULT_PAGE_SIZE filter_names = [ filter_name for filter_name in request.GET.getone('filters').split(',') if filter_name ] if request.GET.get('filters') else [] try: page = int(request.GET.getone('page')) except (ValueError, KeyError): page = 1 text_search = request.GET.get('text_search', None) order = request.GET.get('order') if order is None: order = 'chronological' assert order in ('chronological', 'reverse_chronological', 'score', 'popularity') if order == 'score': assert text_search is not None if page < 1: page = 1 root_post_id = request.GET.getall('root_post_id') if root_post_id: root_post_id = get_database_id("Post", root_post_id[0]) family_post_id = request.GET.getall('family_post_id') if family_post_id: family_post_id = get_database_id("Post", family_post_id[0]) root_idea_id = request.GET.getall('root_idea_id') if root_idea_id: root_idea_id = get_database_id("Idea", root_idea_id[0]) ids = request.GET.getall('ids[]') if ids: ids = [get_database_id("Post", id) for id in ids] view_def = request.GET.get('view') or 'default' only_synthesis = request.GET.get('only_synthesis') post_author_id = request.GET.get('post_author') if post_author_id: post_author_id = get_database_id("AgentProfile", post_author_id) assert AgentProfile.get( post_author_id ), "Unable to find agent profile with id " + post_author_id post_replies_to = request.GET.get('post_replies_to') if post_replies_to: post_replies_to = get_database_id("AgentProfile", post_replies_to) assert AgentProfile.get( post_replies_to ), "Unable to find agent profile with id " + post_replies_to posted_after_date = request.GET.get('posted_after_date') posted_before_date = request.GET.get('posted_before_date') PostClass = SynthesisPost if only_synthesis == "true" else Post ideaContentLinkQuery = discussion.db.query( PostClass.id, PostClass.idea_content_links_above_post) if order == 'score': posts = discussion.db.query(PostClass, Content.body_text_index.score_name) else: posts = discussion.db.query(PostClass) posts = posts.filter(PostClass.discussion_id == discussion_id, ) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.discussion_id == discussion_id) ##no_of_posts_to_discussion = posts.count() post_data = [] # True means deleted only, False (default) means non-deleted only. None means both. # v0 # deleted = request.GET.get('deleted', None) # end v0 # v1: we would like something like that # deleted = request.GET.get('deleted', None) # if deleted is None: # if view_def == 'id_only': # deleted = None # else: # deleted = False # end v1 # v2 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # # if deleted == 'false': # deleted = False # posts = posts.filter(PostClass.tombstone_condition()) # ideaContentLinkQuery = ideaContentLinkQuery.filter(PostClass.tombstone_condition()) # elif deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # ideaContentLinkQuery = ideaContentLinkQuery.filter(PostClass.not_tombstone_condition()) # elif deleted == 'any': # deleted = None # # result will contain deleted and non-deleted posts # pass # end v2 # v3 # deleted = request.GET.get('deleted', None) # if deleted is None: # if not ids: # deleted = False # else: # deleted = None # if deleted == 'true': # deleted = True # posts = posts.filter(PostClass.not_tombstone_condition()) # ideaContentLinkQuery = ideaContentLinkQuery.filter(PostClass.not_tombstone_condition()) # end v3 # v4 deleted = request.GET.get('deleted', None) if deleted is None: if not ids: deleted = False else: deleted = None elif deleted.lower() == "any": deleted = None else: deleted = asbool(deleted) # if deleted is not in (False, True, None): # deleted = False # end v4 only_orphan = asbool(request.GET.get('only_orphan', False)) if only_orphan: if root_idea_id: raise HTTPBadRequest( localizer.translate( _("Getting orphan posts of a specific idea isn't supported." ))) orphans = Idea._get_orphan_posts_statement( discussion_id, True, include_deleted=deleted).subquery("orphans") posts = posts.join(orphans, PostClass.id == orphans.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( orphans, PostClass.id == orphans.c.post_id) if root_idea_id: related = Idea.get_related_posts_query_c(discussion_id, root_idea_id, True, include_deleted=deleted) posts = posts.join(related, PostClass.id == related.c.post_id) ideaContentLinkQuery = ideaContentLinkQuery.join( related, PostClass.id == related.c.post_id) elif not only_orphan: if deleted is not None: if deleted: posts = posts.filter( PostClass.publication_state.in_( deleted_publication_states)) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.publication_state.in_( deleted_publication_states)) else: posts = posts.filter(PostClass.tombstone_date == None) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.tombstone_date == None) if root_post_id: root_post = Post.get(root_post_id) posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id)) elif family_post_id: root_post = Post.get(family_post_id) ancestor_ids = root_post.ancestor_ids() posts = posts.filter((Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) ideaContentLinkQuery = ideaContentLinkQuery.filter( (Post.ancestry.like(root_post.ancestry + cast(root_post.id, String) + ',%')) | (PostClass.id == root_post.id) | (PostClass.id.in_(ancestor_ids))) else: root_post = None if ids: posts = posts.filter(Post.id.in_(ids)) ideaContentLinkQuery = ideaContentLinkQuery.filter(Post.id.in_(ids)) if posted_after_date: posted_after_date = parse_datetime(posted_after_date) if posted_after_date: posts = posts.filter(PostClass.creation_date >= posted_after_date) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creation_date >= posted_after_date) #Maybe we should do something if the date is invalid. benoitg if posted_before_date: posted_before_date = parse_datetime(posted_before_date) if posted_before_date: posts = posts.filter(PostClass.creation_date <= posted_before_date) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creation_date <= posted_before_date) #Maybe we should do something if the date is invalid. benoitg if post_author_id: posts = posts.filter(PostClass.creator_id == post_author_id) ideaContentLinkQuery = ideaContentLinkQuery.filter( PostClass.creator_id == post_author_id) if post_replies_to: parent_alias = aliased(PostClass) posts = posts.join(parent_alias, PostClass.parent) posts = posts.filter(parent_alias.creator_id == post_replies_to) ideaContentLinkQuery = ideaContentLinkQuery.join( parent_alias, PostClass.parent) ideaContentLinkQuery = ideaContentLinkQuery.filter( parent_alias.creator_id == post_replies_to) # Post read/unread management is_unread = request.GET.get('is_unread') translations = None if user_id != Everyone: # This is horrible, but the join creates complex subqueries that # virtuoso cannot decode properly. read_posts = { v.post_id for v in discussion.db.query(ViewPost).filter( ViewPost.tombstone_condition(), ViewPost.actor_id == user_id, *ViewPost.get_discussion_conditions(discussion_id)) } liked_posts = { l.post_id: l.id for l in discussion.db.query(LikedPost).filter( LikedPost.tombstone_condition(), LikedPost.actor_id == user_id, *LikedPost.get_discussion_conditions(discussion_id)) } if is_unread != None: posts = posts.outerjoin( ViewPost, and_(ViewPost.actor_id == user_id, ViewPost.post_id == PostClass.id, ViewPost.tombstone_date == None)) if is_unread == "true": posts = posts.filter(ViewPost.id == None) elif is_unread == "false": posts = posts.filter(ViewPost.id != None) user = AgentProfile.get(user_id) service = discussion.translation_service() if service: translations = PrefCollectionTranslationTable( service, LanguagePreferenceCollection.getCurrent(request)) else: #If there is no user_id, all posts are always unread if is_unread == "false": raise HTTPBadRequest( localizer.translate( _("You must be logged in to view which posts are read"))) if text_search is not None: # another Virtuoso bug: offband kills score. but it helps speed. offband = () if (order == 'score') else None posts = posts.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) ideaContentLinkQuery = ideaContentLinkQuery.filter( Post.body_text_index.contains(text_search.encode('utf-8'), offband=offband)) # posts = posts.options(contains_eager(Post.source)) # Horrible hack... But useful for structure load if view_def == 'id_only': pass # posts = posts.options(defer(Post.body)) else: posts = posts.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: posts = posts.options(*Content.subqueryload_options()) else: posts = posts.options(*Content.joinedload_options()) ideaContentLinkCache = dict(ideaContentLinkQuery.all()) if order == 'chronological': posts = posts.order_by(Content.creation_date) elif order == 'reverse_chronological': posts = posts.order_by(Content.creation_date.desc()) elif order == 'score': posts = posts.order_by(Content.body_text_index.score_name.desc()) elif order == 'popularity': # assume reverse chronological otherwise posts = posts.order_by(Content.like_count.desc(), Content.creation_date.desc()) else: posts = posts.order_by(Content.id) # print str(posts) no_of_posts = 0 no_of_posts_viewed_by_user = 0 if deleted is True: # We just got deleted posts, now we want their ancestors for context post_ids = set() ancestor_ids = set() def add_ancestors(post): post_ids.add(post.id) ancestor_ids.update( [int(x) for x in post.ancestry.strip(",").split(",") if x]) posts = list(posts) for post in posts: add_ancestors(post) ancestor_ids -= post_ids if ancestor_ids: ancestors = discussion.db.query(PostClass).filter( PostClass.id.in_(ancestor_ids)) if view_def == 'id_only': pass # ancestors = ancestors.options(defer(Post.body)) else: ancestors = ancestors.options( # undefer(Post.idea_content_links_above_post), joinedload_all(Post.creator), joinedload_all(Post.extracts), joinedload_all(Post.widget_idea_links), joinedload_all(SynthesisPost.publishes_synthesis), subqueryload_all(Post.attachments)) if len(discussion.discussion_locales) > 1: ancestors = ancestors.options( *Content.subqueryload_options()) else: ancestors = ancestors.options( *Content.joinedload_options()) posts.extend(ancestors.all()) for query_result in posts: score, viewpost, likedpost = None, None, None if not isinstance(query_result, (list, tuple)): query_result = [query_result] post = query_result[0] if deleted is True: add_ancestors(post) if user_id != Everyone: viewpost = post.id in read_posts likedpost = liked_posts.get(post.id, None) if view_def != "id_only": translate_content(post, translation_table=translations, service=service) no_of_posts += 1 serializable_post = post.generic_json(view_def, user_id, permissions) or {} if order == 'score': score = query_result[1] serializable_post['score'] = score if viewpost: serializable_post['read'] = True no_of_posts_viewed_by_user += 1 elif user_id != Everyone and root_post is not None and root_post.id == post.id: # Mark post read, we requested it explicitely viewed_post = ViewPost(actor_id=user_id, post=root_post) discussion.db.add(viewed_post) serializable_post['read'] = True else: serializable_post['read'] = False # serializable_post['liked'] = likedpost.uri() if likedpost else False serializable_post['liked'] = (LikedPost.uri_generic(likedpost) if likedpost else False) if view_def != "id_only": serializable_post['indirect_idea_content_links'] = ( post.indirect_idea_content_links_with_cache( ideaContentLinkCache.get(post.id, None))) post_data.append(serializable_post) # Benoitg: For now, this completely garbles threading without intelligent #handling of pagination. Disabling #posts = posts.limit(page_size).offset(data['startIndex']-1) # This code isn't up to date. If limiting the query by page, we need to # calculate the counts with a separate query to have the right number of # results #no_of_messages_viewed_by_user = discussion.db.query(ViewPost).join( # Post #).filter( # Post.discussion_id == discussion_id, # ViewPost.actor_id == user_id, #).count() if user_id else 0 data = {} data["page"] = page data["unread"] = no_of_posts - no_of_posts_viewed_by_user data["total"] = no_of_posts data["maxPage"] = max(1, ceil(float(data["total"]) / page_size)) #TODO: Check if we want 1 based index in the api data["startIndex"] = (page_size * page) - (page_size - 1) if data["page"] == data["maxPage"]: data["endIndex"] = data["total"] else: data["endIndex"] = data["startIndex"] + (page_size - 1) data["posts"] = post_data return data
def create_post(request): """ Create a new post in this discussion. We use post, not put, because we don't know the id of the post """ localizer = request.localizer request_body = json.loads(request.body) user_id = authenticated_userid(request) if not user_id: raise HTTPUnauthorized() user = Post.default_db.query(User).filter_by(id=user_id).one() body = request_body.get('body', None) html = request_body.get('html', None) # BG: Is this used now? I cannot see it. reply_id = request_body.get('reply_id', None) idea_id = request_body.get('idea_id', None) subject = request_body.get('subject', None) publishes_synthesis_id = request_body.get('publishes_synthesis_id', None) if not body and not publishes_synthesis_id: # Should we allow empty messages otherwise? raise HTTPBadRequest(localizer.translate(_("Your message is empty"))) if reply_id: in_reply_to_post = Post.get_instance(reply_id) else: in_reply_to_post = None if idea_id: in_reply_to_idea = Idea.get_instance(idea_id) else: in_reply_to_idea = None discussion_id = int(request.matchdict['discussion_id']) discussion = Discussion.get_instance(discussion_id) if not discussion: raise HTTPNotFound( localizer.translate(_("No discussion found with id=%s")) % (discussion_id, )) ctx = DummyContext({Discussion: discussion}) if html: log.warning("Still using html") # how to guess locale in this case? body = LangString.create(html) elif body: body = LangString.create_from_json(body, context=ctx, user_id=user_id) else: body = LangString.EMPTY(discussion.db) if subject: subject = LangString.create_from_json(subject, context=ctx, user_id=user_id) else: # print(in_reply_to_post.subject, discussion.topic) if in_reply_to_post: subject = (in_reply_to_post.get_title().first_original().value or '' if in_reply_to_post.get_title() else '') elif in_reply_to_idea: # TODO: THis should use a cascade like the frontend subject = (in_reply_to_idea.short_title if in_reply_to_idea.short_title else '') else: subject = discussion.topic if discussion.topic else '' # print subject if subject is not None and len(subject): new_subject = "Re: " + restrip_pat.sub('', subject).strip() if (in_reply_to_post and new_subject == subject and in_reply_to_post.get_title()): # reuse subject and translations subject = in_reply_to_post.get_title().clone(discussion.db) else: # how to guess locale in this case? subject = LangString.create(new_subject) else: capture_message( "A message is about to be written to the database with an " "empty subject. This is not supposed to happen.") subject = LangString.EMPTY(discussion.db) post_constructor_args = { 'discussion': discussion, 'creator_id': user_id, 'subject': subject, 'body': body } if publishes_synthesis_id: published_synthesis = Synthesis.get_instance(publishes_synthesis_id) post_constructor_args['publishes_synthesis'] = published_synthesis new_post = SynthesisPost(**post_constructor_args) new_post.finalize_publish() else: new_post = AssemblPost(**post_constructor_args) discussion.db.add(new_post) discussion.db.flush() if in_reply_to_post: new_post.set_parent(in_reply_to_post) if in_reply_to_idea: idea_post_link = IdeaRelatedPostLink(creator_id=user_id, content=new_post, idea=in_reply_to_idea) discussion.db.add(idea_post_link) idea = in_reply_to_idea while idea: idea.send_to_changes() parents = idea.get_parents() idea = next(iter(parents)) if parents else None else: discussion.root_idea.send_to_changes() for source in discussion.sources: if 'send_post' in dir(source): source.send_post(new_post) permissions = get_permissions(user_id, discussion_id) return new_post.generic_json('default', user_id, permissions)
def search(request): q = request.params.get("q", '') if q: should = [] for field in SEARCH_FIELDS: kw = {"query": q} if field in SEARCH_BOOSTS: kw["boost"] = SEARCH_BOOSTS[field] should.append(Q("match", **{field: kw})) # Add a prefix query if ``q`` is longer than one character. if len(q) > 1: should.append(Q('prefix', normalized_name=q)) query = request.es.query("dis_max", queries=should) query = query.suggest("name_suggestion", q, term={"field": "name"}) else: query = request.es.query() if request.params.get("o"): sort_key = request.params["o"] if sort_key.startswith("-"): sort = { sort_key[1:]: { "order": "desc", "unmapped_type": "long", }, } else: sort = { sort_key: { "unmapped_type": "long", } } query = query.sort(sort) # Require match to all specified classifiers for classifier in request.params.getall("c"): query = query.filter("terms", classifiers=[classifier]) try: page_num = int(request.params.get("page", 1)) except ValueError: raise HTTPBadRequest("'page' must be an integer.") page = ElasticsearchPage( query, page=page_num, url_maker=paginate_url_factory(request), ) if page.page_count and page_num > page.page_count: return HTTPNotFound() available_filters = collections.defaultdict(list) classifiers_q = (request.db.query(Classifier).with_entities( Classifier.classifier).filter( exists([release_classifiers.c.trove_id]).where( release_classifiers.c.trove_id == Classifier.id)).order_by( Classifier.classifier)) for cls in classifiers_q: first, *_ = cls.classifier.split(' :: ') available_filters[first].append(cls.classifier) def filter_key(item): try: return 0, SEARCH_FILTER_ORDER.index(item[0]), item[0] except ValueError: return 1, 0, item[0] return { "page": page, "term": q, "order": request.params.get("o", ''), "available_filters": sorted(available_filters.items(), key=filter_key), "applied_filters": request.params.getall("c"), }
def versions(self): versions = self.stage.list_versions(self.project) if not versions: raise HTTPNotFound("The project %s does not exist." % self.project) return get_sorted_versions(versions)
def generate_histogram(self): grading = self.request.context.grading formula = self.request.GET.get('formula', grading.formula) lecture_students = self.get_lecture_students(grading) exam_ids, examvars, varsForExam = self.get_exam_vars(grading) grades = self.get_current_grades(grading, lecture_students, exam_ids) error_msgs = [] grades = self.populate_with_exam_results(grades, lecture_students, grading) grades, error_msgs = self.apply_formula(grades, formula, lecture_students, grading, varsForExam, error_msgs) grades_list = [ float(grades[student_id]['calc']) for student_id in grades.keys() if not grades[student_id]['calc'] == '' ] if not grades_list: raise HTTPNotFound( "Es sind existieren keine Noten für diese Benotung.") # count occurences of grades and save it in a list as tuple (grade, count) tuple_list = list(Counter(grades_list).items()) # sort the list by grades tuple_list = sorted(tuple_list, key=lambda x: x[0]) labels = [x[0] for x in tuple_list] values = [x[1] for x in tuple_list] indexes = numpy.arange(len(labels)) width = 1 pyplot.rcParams.update({'font.size': 20}) fig = pyplot.figure(figsize=(12, 9)) ax = fig.add_subplot(111) pyplot.sca(ax) pyplot.bar(indexes, values, width, edgecolor='black', color='red') pyplot.xticks(indexes + width - 1, labels) pyplot.xlabel('Note') pyplot.ylabel('Anzahl') yint = range(min(values), math.ceil(max(values)) + 1, math.ceil(max(values) / 10)) pyplot.yticks(yint) percentage_message = [] grades_count = len(grades_list) percentage_list = [] for x in range(1, 5): percentage_list.append( 100 * sum(grade <= x for grade in grades_list) / grades_count) percentage_message.append('• {:.1f}% haben die Note 1.0\n'.format( percentage_list[0])) percentage_message.append( '• {:.1f}% haben die Note 2.0 oder besser\n'.format( percentage_list[1])) percentage_message.append( '• {:.1f}% haben die Note 3.0 oder besser\n'.format( percentage_list[2])) percentage_message.append( '• {:.1f}% haben die Note 4.0 oder besser\n'.format( percentage_list[3])) percentage_message.append( '• {:.1f}% haben die Note 5.0\n'.format(100 - percentage_list[3])) pyplot.text(-0.5, -3, "".join(percentage_message), verticalalignment='top') return fig
def user(context, request): gallery = retrieve_gallery(context) if gallery is not None: return HTTPFound(location=request.resource_url(gallery)) else: return HTTPNotFound()
def save_idea(request): discussion_id = int(request.matchdict['discussion_id']) idea_id = request.matchdict['id'] idea_data = json.loads(request.body) #Idea.default_db.execute('set transaction isolation level read committed') # Special items in TOC, like unsorted posts. if idea_id in ['orphan_posts']: return {'ok': False, 'id': Idea.uri_generic(idea_id)} idea = Idea.get_instance(idea_id) if not idea: raise HTTPNotFound("No such idea: %s" % (idea_id)) if isinstance(idea, RootIdea): raise HTTPBadRequest("Cannot edit root idea.") discussion = Discussion.get(int(discussion_id)) if not discussion: raise HTTPNotFound("Discussion with id '%s' not found." % discussion_id) if (idea.discussion_id != discussion.id): raise HTTPBadRequest( "Idea from discussion %s cannot saved from different discussion (%s)." % (idea.discussion_id, discussion.id)) if 'shortTitle' in idea_data: idea.short_title = idea_data['shortTitle'] if 'longTitle' in idea_data: idea.long_title = idea_data['longTitle'] if 'definition' in idea_data: idea.definition = idea_data['definition'] if 'parentId' in idea_data and idea_data['parentId'] is not None: # TODO: Make sure this is sent as a list! parent = Idea.get_instance(idea_data['parentId']) # calculate it early to maximize contention. prev_ancestors = parent.get_all_ancestors() new_ancestors = set() order = idea_data.get('order', 0.0) if not parent: raise HTTPNotFound("Missing parentId %s" % (idea_data['parentId'])) for parent_link in idea.source_links: # still assuming there's only one. pl_parent = parent_link.source pl_ancestors = pl_parent.get_all_ancestors() new_ancestors.update(pl_ancestors) if parent_link.source != parent: parent_link.copy(True) parent_link.source = parent parent.db.expire(parent, ['target_links']) parent.db.expire(pl_parent, ['target_links']) for ancestor in pl_ancestors: if ancestor in prev_ancestors: break ancestor.send_to_changes() for ancestor in prev_ancestors: if ancestor in new_ancestors: break ancestor.send_to_changes() parent_link.order = order parent_link.db.expire(parent_link.source, ['target_links']) parent_link.source.send_to_changes() parent_link.db.flush() idea.is_in_next_synthesis = idea_data.get('inNextSynthesis', False) idea.send_to_changes() return {'ok': True, 'id': idea.uri()}
def post_upload(context, request): properties = context.upgrade_properties() if properties['status'] not in ('uploading', 'upload failed'): raise HTTPForbidden( 'status must be "uploading" to issue new credentials') accession_or_external = properties.get( 'accession') or properties['external_accession'] file_upload_bucket = request.registry.settings['file_upload_bucket'] external = context.propsheets.get('external', None) registry = request.registry if external is None: # Handle objects initially posted as another state. bucket = file_upload_bucket uuid = context.uuid mapping = context.schema['file_format_file_extension'] file_extension = mapping[properties['file_format']] date = properties['date_created'].split('T')[0].replace('-', '/') key = '{date}/{uuid}/{accession_or_external}{file_extension}'.format( accession_or_external=accession_or_external, date=date, file_extension=file_extension, uuid=uuid, **properties) elif external.get('service') == 's3': bucket = external['bucket'] # Must reset file to point to file_upload_bucket (keep AWS public dataset in sync). if bucket != file_upload_bucket: registry.notify(BeforeModified(context, request)) context._set_external_sheet({'bucket': file_upload_bucket}) registry.notify(AfterModified(context, request)) bucket = file_upload_bucket key = external['key'] else: raise HTTPNotFound(detail='External service {} not expected'.format( external.get('service'))) name = 'up{time:.6f}-{accession_or_external}'.format( accession_or_external=accession_or_external, time=time.time(), **properties)[:32] # max 32 chars profile_name = request.registry.settings.get('file_upload_profile_name') upload_creds = UploadCredentials(bucket, key, name, profile_name=profile_name) s3_transfer_allow = request.registry.settings.get( 'external_aws_s3_transfer_allow', 'false') creds = upload_creds.external_creds( s3_transfer_allow=asbool(s3_transfer_allow), s3_transfer_buckets=request.registry.settings.get( 'external_aws_s3_transfer_buckets'), ) new_properties = None if properties['status'] == 'upload failed': new_properties = properties.copy() new_properties['status'] = 'uploading' registry.notify(BeforeModified(context, request)) context.update(new_properties, {'external': creds}) registry.notify(AfterModified(context, request)) rendered = request.embed('/%s/@@object' % context.uuid, as_user=True) result = { 'status': 'success', '@type': ['result'], '@graph': [rendered], } return result
def download_response(self, package): # Don't need to implement because the download urls go to S3 return HTTPNotFound()
def relogin_to_user(request): user = UserService.by_id(request.GET.get("user_id")) if not user: return HTTPNotFound() headers = security.remember(request, user.id) return HTTPFound(location=request.route_url("/"), headers=headers)
def not_found(self): result = HTTPNotFound() result.content_type = 'application/json' result.body = NOT_FOUND return result
def notfound(request): raise HTTPNotFound()
def thread(request): thread_id = request.matchdict['thread_id'] thread = DBSession.query(Thread).get(thread_id) if thread is None: raise HTTPNotFound(comment='No such thread.').exception return {'thread': thread}
def _get_question(self): try: return self.question_query.get_by_id(self._get_question_id()) except NoResultFound: raise HTTPNotFound()
def render(self): package = Package.by_name(self.session, self.request.matchdict['package_name']) if not package: raise HTTPNotFound() if 'form.refresh_package' in self.request.params: package.update_at = None self.session.add(package) owners = dict((usr.login, usr) for usr in package.owners) can_edit_role = package.local and ( self.login in owners.keys() or any(g.name == 'admin' for g in self.user.groups) ) if 'form.add_role' in self.request.params: if not can_edit_role: raise HTTPForbidden() user = User.by_login(self.session, self.request.params['login']) if user and user.has_permission('upload_releasefile'): if self.request.params['role'] == 'owner': if user.login not in owners: package.owners.append(user) else: maintainers = [usr.login for usr in package.owners] if user.login not in maintainers: package.maintainers.append(user) self.session.add(package) if 'form.remove_maintainer' in self.request.params: if not can_edit_role: raise HTTPForbidden() user = User.by_login(self.session, self.request.params['login']) if user: maintainers = dict((usr.login, usr) for usr in package.maintainers) if user.login in maintainers: package.maintainers.remove(maintainers[user.login]) self.session.add(package) if 'form.remove_owner' in self.request.params: if not can_edit_role: raise HTTPForbidden() user = User.by_login(self.session, self.request.params['login']) if user: if user.login in owners: package.owners.remove(owners[user.login]) self.session.add(package) if 'release_version' in self.request.matchdict: release = Release.by_version( self.session, package.name, self.request.matchdict['release_version']) else: release = package.sorted_releases[0] return {u'package': package, u'release': release, u'can_edit_role': can_edit_role, }
def get_file_from_attempt(request): """ Get a portion of a package bound to an Attempt. Get a specific member, by name (raw): `/api/:api_id/files/:attempt_id/:target?file=:member&raw=true` Get a specific member, by name: `/api/:api_id/files/:attempt_id/:target.zip?file=:member` Get more than one specific members, by name: `/api/:api_id/files/:attempt_id/:target.zip?file=:member&file=:member2` Get the full package: `/api/:api_id/files/:attempt_id/:target.zip?full=true` """ has_body = False attempt_id = request.matchdict.get('attempt_id', None) target = request.matchdict.get('target', None) try: attempt = request.db.query(models.Attempt).get(attempt_id) except DataError: return HTTPNotFound() if attempt is None: return HTTPNotFound() is_full = asbool(request.GET.get('full', False)) is_raw = asbool(request.GET.get('raw', False)) if is_full and is_raw: return HTTPBadRequest() response = Response(status_code=200) # Get the full package. if is_full: response.content_type = 'application/zip' response.app_iter = open(attempt.filepath, 'rb') has_body = True elif is_raw: member_name = request.GET.get('file') response.content_type = 'text/xml' response.app_iter = attempt.analyzer.get_fp(member_name) has_body = True else: response.content_type = 'application/zip' # Get partial portions of the package. files = [member for attr, member in request.GET.items() if attr == 'file'] try: if files: response.app_iter = attempt.analyzer.subzip(*files) has_body = True except ValueError: return HTTPBadRequest() return response if has_body else HTTPBadRequest()
def _get_document_or_404(self, identifiers): document = self.document_cls.find_one(self._get_ids_dict(identifiers)) if document is None: raise HTTPNotFound() return document
def found_view(request): guid = request.matchdict.get("guid") typ = request.matchdict.get("typ", "all") if request.matched_route.name == 'rest_json_list': typ = request.GET.get('kind', 'all') bld = typ.endswith('_b') if typ not in ('all', 'found', 'street', 'not found', 'all_b', 'found_b', 'not found_b'): raise HTTPBadRequest() if not guid or guid == 'None': #root is ok guid = None else: #Make check for malformed guid try: guid = uuid.UUID(guid) except ValueError: raise HTTPBadRequest() #Make check for area exist myself = melt.fias_AONode(guid) if guid and not myself.isok: raise HTTPNotFound() if bld: alist = myself.subB(typ) alist.sort(key=lambda el: el.onestr) else: alist = myself.subO(typ, not ('rest' in request.url)) alist.sort(key=lambda el: el.offname) fullstat = guid is not None and myself.stat_db_full fullstat = fullstat or all([it.stat_db_full for it in myself.subO('all')]) offset = int(request.matchdict.get("offset", 0)) myself.need_more = (len(alist) > (off_border * 1.5) and len(alist) > off_border + offset) if 'rest' in request.url: request.response.content_type = 'text/xml' myself.need_more = False if (offset or myself.need_more): myself.offlinks = True alist = alist[offset:offset + off_border] else: myself.offlinks = False def links(self, typ_l): if typ_l in ('all', 'found', 'street', 'not found', 'all_b', 'found_b', 'not found_b'): return request.route_url('found0', guid=self.guid, typ=typ_l) elif typ_l == 'details': return request.route_url('details', guid=self.guid, kind='ao') elif typ_l == 'top': if self.parent.guid: return request.route_url('found0', guid=self.parentguid, typ='all') else: return request.route_url('foundroot0', typ='all') elif typ_l == "prev": return request.route_url('found', guid=self.guid, typ=typ, offset=max(0, offset - off_border)) elif typ_l == "next": return request.route_url('found', guid=self.guid, typ=typ, offset=min( self.stat(typ) - 1, offset + off_border)) return { "list": alist, "myself": myself, "links": links, 'bld': bld, 'fullstat': fullstat }
def artistView(request): artist_id = int(request.matchdict["id"]) session = request.DBSession artist = session.query(Artist).filter_by(id=artist_id).first() if not artist: raise HTTPNotFound() def _filterByType(typ, artist, albums): if typ == SINGLE_TYPE: return artist.getTrackSingles() elif typ == ALL_TYPE: return albums else: return artist.getAlbumsByType(typ) albums = list(artist.albums) all_tabs = [ALL_TYPE] + ALBUM_TYPE_IDS active_albums = [] active_singles = [] active_tab = request.GET.get("album_tab", None) if not active_tab: # No album type was requested, try to pick a smart one. for active_tab in all_tabs: active_albums = _filterByType(active_tab, artist, albums) if active_albums: break else: active_albums = _filterByType(active_tab, artist, albums) if active_tab == SINGLE_TYPE: active_singles = active_albums active_albums = [] if active_albums: active_albums = util.sortByDate(active_albums, active_tab == LIVE_TYPE) else: # Unlike tags, the orm.Track does not have dates so not sorting :/ # XXX active_singles = util.sortByDate(active_singles) pass for a in active_albums: covers = [ img for img in a.images if img.type == Image.FRONT_COVER_TYPE ] a.cover = random.choice(covers) if covers else None tabs = [] for name in all_tabs: t = (name, TYPE_DISPLAY_NAMES[name], active_tab == name, bool(len(_filterByType(name, artist, albums)))) tabs.append(t) return ResponseDict( artist=artist, active_tab=active_tab, active_albums=active_albums, active_singles=active_singles, tabs=tabs, )