def get(self, request): params = self.get_params(request) queryset = self.get_queryset(request, params) # If Haystack is installed, perform the search if params['query'] and OPTIONAL_DEPS['haystack']: usage.log('search', model=self.model, request=request, data={ 'query': params['query'], }) results = self.model.objects.search(params['query'], queryset=queryset, max_results=params['limit'], partial=True) objects = (x.object for x in results) else: if params['sort'] == 'name': order = '-name' if params['order'] == 'desc' else 'name' queryset = queryset.order_by(order) if params['limit']: queryset = queryset[:params['limit']] objects = queryset if self.checks_for_orphans: pks = [] for obj in objects: if not is_field_orphaned(obj): pks.append(obj.pk) objects = self.model.objects.filter(pk__in=pks) return self.prepare(request, objects, **params)
def get(self, request, pk): instance = self.get_object(request, pk=pk) if is_field_orphaned(instance): data = {"message": "Orphaned fields do not support distribution calls."} return self.render(request, data, status=codes.unprocessable_entity) params = self.get_params(request) if params["aware"]: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params["processor"]] processor = QueryProcessor(context=context, tree=instance.model) queryset = processor.get_queryset(request=request) # Get the value/label mapping to augment the result for display value_labels = instance.value_labels(queryset=queryset) result = [] for value, count in instance.dist(queryset=queryset): if value in value_labels: label = value_labels[value] else: label = smart_unicode(value) result.append({"value": value, "label": label, "count": count}) usage.log("dist", instance=instance, request=request) return result
def test_instance(self): f = DataField(app_name='avocado', model_name='datafield', field_name='name') f.save() usage.log('test', instance=f, async=False) self.assertEqual(Log.objects.get(pk=1).content_object, f)
def get(self, request, pk): uri = request.build_absolute_uri instance = self.get_object(request, pk=pk) if instance.simple_type == 'number': stats = instance.max().min().avg() elif (instance.simple_type == 'date' or instance.simple_type == 'time' or instance.simple_type == 'datetime'): stats = instance.max().min() else: stats = instance.count(distinct=True) if stats is None: resp = {} else: try: resp = next(iter(stats)) except StopIteration: resp = {} resp['_links'] = { 'self': { 'href': uri(reverse('serrano:field-stats', args=[instance.pk])), }, 'parent': { 'href': uri(reverse('serrano:field', args=[instance.pk])), }, } usage.log('stats', instance=instance, request=request) return resp
def get(self, request): params = self.get_params(request) queryset = self.get_queryset(request, params) # If Haystack is installed, perform the search if params['query'] and OPTIONAL_DEPS['haystack']: usage.log('search', model=self.model, request=request, data={ 'query': params['query'], }) results = self.model.objects.search( params['query'], queryset=queryset, max_results=params['limit'], partial=True) objects = (x.object for x in results) else: if params['sort'] == 'name': order = '-name' if params['order'] == 'desc' else 'name' queryset = queryset.order_by(order) if params['limit']: queryset = queryset[:params['limit']] objects = queryset if self.checks_for_orphans: pks = [] for obj in objects: if not is_field_orphaned(obj): pks.append(obj.pk) objects = self.model.objects.filter(pk__in=pks) return self.prepare(request, objects, **params)
def get(self, request, pk): uri = request.build_absolute_uri instance = self.get_object(request, pk=pk) if instance.simple_type == 'number': stats = instance.max().min().avg() elif (instance.simple_type == 'date' or instance.simple_type == 'time' or instance.simple_type == 'datetime'): stats = instance.max().min() else: stats = instance.count(distinct=True) if stats is None: resp = {} else: try: resp = next(iter(stats)) except StopIteration: resp = {} resp['_links'] = { 'self': { 'href': uri( reverse('serrano:field-stats', args=[instance.pk])), }, 'parent': { 'href': uri(reverse('serrano:field', args=[instance.pk])), }, } usage.log('stats', instance=instance, request=request) return resp
def get(self, request, **kwargs): instance = self.get_object(request, **kwargs) usage.log('read', instance=instance, request=request) self.model.objects.filter(pk=instance.pk).update( accessed=datetime.now()) return self.prepare(request, instance)
def get(self, request, pk): instance = self.get_object(request, pk=pk) if is_field_orphaned(instance): data = {"message": "Orphaned fields do not support values calls."} return self.render(request, data, status=codes.unprocessable_entity) params = self.get_params(request) if params["aware"]: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params["processor"]] processor = QueryProcessor(tree=instance.model, context=context) queryset = processor.get_queryset(request=request) if params["random"]: # In the case that the queryset contains a population smaller than # the number of random items being requested, a ValueError will be # triggered. Instead of passing the error on to the client, we # simply return all the possible values. try: return self.get_random_values(request, instance, params["random"], queryset) except ValueError: return instance.values(queryset=queryset) page = params["page"] limit = params["limit"] # If a query term is supplied, perform the icontains search. if params["query"]: usage.log("items", instance=instance, request=request, data={"query": params["query"]}) values = self.get_search_values(request, instance, params["query"], queryset) else: values = self.get_all_values(request, instance, queryset) # No page specified, return everything. if page is None: return values paginator = self.get_paginator(values, limit=limit) page = paginator.page(page) # Get paginator-based response. data = self.get_page_response(request, paginator, page) data.update({"items": page.object_list}) # Add links. path = reverse("serrano:field-values", kwargs={"pk": pk}) links = self.get_page_links(request, path, page, extra=params) templates = self.get_link_templates(request) response = self.render(request, content=data) return patch_response(request, response, links, templates)
def get(self, request, **kwargs): params = self.get_params(request) instance = self.get_object(request, **kwargs) usage.log('read', instance=instance, request=request) instance.accessed = datetime.now() instance.save(update_fields=('accessed',)) return self.prepare(request, instance, tree=params['tree'])
def get(self, request, **kwargs): params = self.get_params(request) instance = self.get_object(request, **kwargs) usage.log('read', instance=instance, request=request) instance.accessed = datetime.now() instance.save(update_fields=('accessed', )) return self.prepare(request, instance, tree=params['tree'])
def test_instance(self): f = DataField(app_name='avocado', model_name='datafield', field_name='name') f.save() usage.log('test', instance=f, async=False) event = Log.objects.all()[0] self.assertEqual(event.content_object, f)
def get(self, request, **kwargs): instance = self.get_object(request, **kwargs) usage.log('read', instance=instance, request=request) # Fast single field update.. # TODO Django 1.5+ supports this on instance save methods. self.model.objects.filter(pk=instance.pk).update( accessed=datetime.now()) return self.prepare(request, instance)
def delete(self, request, **kwargs): instance = self.get_object(request, **kwargs) if instance.session: data = { 'message': 'Cannot delete session view', } return self.render(request, data, status=codes.bad_request) instance.delete() usage.log('delete', instance=instance, request=request)
def get(self, request, pk): instance = self.get_object(request, pk=pk) if (self.checks_for_orphans and has_orphaned_field(instance)): data = {'message': 'One or more orphaned fields exist'} return self.render(request, data, status=codes.internal_server_error) usage.log('read', instance=instance, request=request) return self.prepare(request, instance)
def get(self, request, pk): uri = request.build_absolute_uri instance = self.get_object(request, pk=pk) params = self.get_params(request) if params['aware']: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params['processor']] processor = QueryProcessor(context=context, tree=instance.model) queryset = processor.get_queryset(request=request) if instance.simple_type == 'number': # Since the call to max() returns an Aggregator object with the # queryset stored internally, we don't pass the queryset to min() # or avg() like we do when we call max() which is called on the # instance itself not on an Aggregator like min() or avg() are # called on. stats = instance.max(queryset=queryset).min().avg() elif (instance.simple_type == 'date' or instance.simple_type == 'time' or instance.simple_type == 'datetime'): # Since the call to max() returns an Aggregator object with the # queryset stored internally, we don't pass the queryset to min() # like we do when we call max() which is called on the instance # itself not on an Aggregator like min() is called on. stats = instance.max(queryset=queryset).min() else: stats = instance.count(queryset=queryset, distinct=True) if stats is None: resp = {} else: try: resp = next(iter(stats)) except StopIteration: resp = {} resp['_links'] = { 'self': { 'href': uri( reverse('serrano:field-stats', args=[instance.pk])), }, 'parent': { 'href': uri(reverse('serrano:field', args=[instance.pk])), }, } usage.log('stats', instance=instance, request=request) return resp
def get(self, request, pk): instance = self.get_object(request, pk=pk) if (self.checks_for_orphans and has_orphaned_field(instance)): data = { 'message': 'One or more orphaned fields exist' } return self.render(request, data, status=codes.internal_server_error) usage.log('read', instance=instance, request=request) return self.prepare(request, instance)
def delete(self, request, **kwargs): instance = self.get_object(request, **kwargs) # Cannot delete the current session if instance.session: data = { 'message': 'Cannot delete session context', } return self.render(request, data, status=codes.bad_request) instance.delete() usage.log('delete', instance=instance, request=request) request.session.modified = True
def test_request(self): user = User.objects.create_user('root', 'root') session = SessionStore() session.save() request = HttpRequest() request.user = user request.session = session usage.log('test', request=request, async=False) event = Log.objects.all()[0] self.assertEqual(event.user, user) self.assertEqual(event.session_key, request.session.session_key)
def get(self, request, pk): instance = self.get_object(request, pk=pk) usage.log('read', instance=instance, request=request) # If the field is an orphan then log an error before returning an error if self.checks_for_orphans and is_field_orphaned(instance): data = { 'message': 'Orphaned field', } return self.render(request, data, status=codes.internal_server_error) return self.prepare(request, instance)
def get(self, request, pk=None): params = self.get_params(request) order = [ '-category__order' if params['order'] == 'desc' else 'category__order' ] queryset = self.get_queryset(request, params) if params['query']: usage.log('search', model=self.model, request=request, data={ 'query': params['query'], }) if self.checks_for_orphans: queryset = self._get_non_orphans(queryset) queryset = self.model.objects.search(params['query'], queryset=queryset, max_results=params['limit'], partial=True) # If we searched using haystack then we need to extract the objects # from the returned queryset. Otherwise, we can just use the # queryset directly. if OPTIONAL_DEPS['haystack']: objects = (x.object for x in queryset) else: objects = queryset else: if params['sort'] == 'name': order.append('-name' if params['order'] == 'desc' else 'name') # We need to order before a possible slice is taken because # querysets cannot be ordered post-slice. queryset = queryset.order_by(*order) if self.checks_for_orphans: queryset = self._get_non_orphans(queryset) if params['limit']: queryset = queryset[:params['limit']] objects = queryset return self.prepare(request, objects, **params)
def get(self, request, pk=None): params = self.get_params(request) order = [ '-category__order' if params['order'] == 'desc' else 'category__order' ] queryset = self.get_queryset(request) # For privileged users, check if any filters are applied, otherwise # only allow for published objects. if not can_change_concept(request.user) or not params['unpublished']: queryset = queryset.published() # If Haystack is installed, perform the search if params['query'] and OPTIONAL_DEPS['haystack']: usage.log('search', model=self.model, request=request, data={ 'query': params['query'], }) results = self.model.objects.search(params['query'], queryset=queryset, max_results=params['limit'], partial=True) objects = (x.object for x in results) else: if params['sort'] == 'name': order.append('-name' if params['order'] == 'desc' else 'name') # We need to order before a possible slice is taken because # querysets cannot be ordered post-slice. queryset = queryset.order_by(*order) if params['limit']: queryset = queryset[:params['limit']] objects = queryset if self.checks_for_orphans and params['embed']: pks = [] for obj in objects: if not has_orphaned_field(obj): pks.append(obj.pk) objects = self.model.objects.filter(pk__in=pks) return self.prepare(request, objects, **params)
def delete(self, request, **kwargs): instance = self.get_object(request, **kwargs) if instance.session: data = { 'message': 'Cannot delete session query', } return self.render(request, data, status=codes.bad_request) utils.send_mail(instance.shared_users.values_list('email', flat=True), DELETE_QUERY_EMAIL_TITLE.format(instance.name), DELETE_QUERY_EMAIL_BODY.format(instance.name)) instance.delete() usage.log('delete', instance=instance, request=request)
def post(self, request): form = ContextForm(request, request.data) if form.is_valid(): instance = form.save() usage.log('create', instance=instance, request=request) response = self.render(request, self.prepare(request, instance), status=codes.created) else: data = { 'message': 'Error creating context', 'errors': dict(form.errors), } response = self.render(request, data, status=codes.unprocessable_entity) return response
def get(self, request, pk): instance = self.get_object(request, pk=pk) params = self.get_params(request) if params['random']: return self.get_random_values(request, instance, params['random']) page = params['page'] limit = params['limit'] # If a query term is supplied, perform the icontains search if params['query']: usage.log('values', instance=instance, request=request, data={ 'query': params['query'], }) values = self.get_search_values(request, instance, params['query']) else: values = self.get_all_values(request, instance) # No page specified, return everything if page is None: return values paginator = self.get_paginator(values, limit=limit) page = paginator.page(page) # Get paginator-based response resp = self.get_page_response(request, paginator, page) # Add links path = reverse('serrano:field-values', kwargs={'pk': pk}) links = self.get_page_links(request, path, page, extra=params) links['parent'] = { 'href': request.build_absolute_uri( reverse('serrano:field', kwargs={'pk': pk})), } resp.update({ '_links': links, 'values': page.object_list, }) return resp
def post(self, request): form = ViewForm(request, request.data) if form.is_valid(): instance = form.save() usage.log('create', instance=instance, request=request) request.session.modified = True response = self.render(request, self.prepare(request, instance), status=codes.created) else: data = { 'message': 'Cannot create view', 'errors': dict(form.errors), } response = self.render(request, data, status=codes.unprocessable_entity) return response
def put(self, request, **kwargs): instance = self.get_object(request, **kwargs) form = ContextForm(request, request.data, instance=instance) if form.is_valid(): instance = form.save() usage.log('update', instance=instance, request=request) response = self.render(request, self.prepare(request, instance)) else: data = { 'message': 'Error updating context', 'errors': dict(form.errors), } response = self.render(request, data, status=codes.unprocessable_entity) return response
def get(self, request, pk=None): params = self.get_params(request) order = ['-category__order' if params['order'] == 'desc' else 'category__order'] queryset = self.get_queryset(request, params) if params['query']: usage.log('search', model=self.model, request=request, data={ 'query': params['query'], }) if self.checks_for_orphans: queryset = self._get_non_orphans(queryset) queryset = self.model.objects.search( params['query'], queryset=queryset, max_results=params['limit'], partial=True) # If we searched using haystack then we need to extract the objects # from the returned queryset. Otherwise, we can just use the # queryset directly. if OPTIONAL_DEPS['haystack']: objects = (x.object for x in queryset) else: objects = queryset else: if params['sort'] == 'name': order.append('-name' if params['order'] == 'desc' else 'name') # We need to order before a possible slice is taken because # querysets cannot be ordered post-slice. queryset = queryset.order_by(*order) if self.checks_for_orphans: queryset = self._get_non_orphans(queryset) if params['limit']: queryset = queryset[:params['limit']] objects = queryset return self.prepare(request, objects, **params)
def put(self, request, **kwargs): instance = self.get_object(request, **kwargs) form = QueryForm(request, request.data, instance=instance) if form.is_valid(): instance = form.save() usage.log('update', instance=instance, request=request) response = self.render(request, self.prepare(request, instance)) else: data = { 'message': 'Cannot update query', 'errors': dict(form.errors), } response = self.render(request, data, status=codes.unprocessable_entity) return response
def put(self, request, **kwargs): instance = self.get_object(request, **kwargs) form = QueryForm(request, request.data, instance=instance) if form.is_valid(): instance = form.save() usage.log('update', instance=instance, request=request) request.session.modified = True response = self.render(request, self.prepare(request, instance)) else: data = { 'message': 'Cannot update query', 'errors': dict(form.errors), } response = self.render(request, data, status=codes.unprocessable_entity) return response
def post(self, request): form = QueryForm(request, request.data) if form.is_valid(): instance = form.save() usage.log('create', instance=instance, request=request) response = self.render(request, self.prepare(request, instance), status=codes.created) else: data = { 'message': 'Error creating query', 'errors': dict(form.errors), } response = self.render(request, data, status=codes.unprocessable_entity) return response
def get(self, request, pk=None): params = self.get_params(request) order = ['-category__order' if params['order'] == 'desc' else 'category__order'] queryset = self.get_queryset(request) # For privileged users, check if any filters are applied, otherwise # only allow for published objects. if not can_change_concept(request.user) or not params['unpublished']: queryset = queryset.published() # If Haystack is installed, perform the search if params['query'] and OPTIONAL_DEPS['haystack']: usage.log('search', model=self.model, request=request, data={ 'query': params['query'], }) results = self.model.objects.search( params['query'], queryset=queryset, max_results=params['limit'], partial=True) objects = (x.object for x in results) else: if params['sort'] == 'name': order.append('-name' if params['order'] == 'desc' else 'name') # We need to order before a possible slice is taken because # querysets cannot be ordered post-slice. queryset = queryset.order_by(*order) if params['limit']: queryset = queryset[:params['limit']] objects = queryset if self.checks_for_orphans and params['embed']: pks = [] for obj in objects: if not has_orphaned_field(obj): pks.append(obj.pk) objects = self.model.objects.filter(pk__in=pks) return self.prepare(request, objects, **params)
def get(self, request, pk, item_pk=None): instance = request.instance params = self.get_params(request) if params['random']: return self.get_random_values(request, instance, params, item_pk) page = params['page'] limit = params['limit'] # If a query term is supplied, perform the icontains search if params['query']: usage.log('values', instance=instance, request=request, data={ 'query': params['query'], 'item_pk': item_pk, }) values = self.get_search_values(request, instance, params, item_pk) else: values = self.get_all_values(request, instance, params, item_pk) # No page specified, return everything if page is None: return values paginator = self.get_paginator(values, limit=limit) page = paginator.page(page) kwargs = {'pk': pk} if item_pk: kwargs['item_pk'] = item_pk path = reverse('vocab:items', kwargs=kwargs) links = self.get_page_links(request, path, page, extra=params) links['parent'] = { 'href': request.build_absolute_uri(reverse('serrano:field', kwargs={'pk': pk})) } return { 'values': page.object_list, 'limit': paginator.per_page, 'num_pages': paginator.num_pages, 'page_num': page.number, '_links': links, }
def get(self, request, pk): instance = self.get_object(request, pk=pk) params = self.get_params(request) if params['random']: return self.get_random_values(request, instance, params['random']) page = params['page'] limit = params['limit'] # If a query term is supplied, perform the icontains search if params['query']: usage.log('values', instance=instance, request=request, data={ 'query': params['query'], }) values = self.get_search_values(request, instance, params['query']) else: values = self.get_all_values(request, instance) # No page specified, return everything if page is None: return values paginator = self.get_paginator(values, limit=limit) page = paginator.page(page) # Get paginator-based response resp = self.get_page_response(request, paginator, page) # Add links path = reverse('serrano:field-values', kwargs={'pk': pk}) links = self.get_page_links(request, path, page, extra=params) links['parent'] = { 'href': request.build_absolute_uri(reverse('serrano:field', kwargs={'pk': pk})), } resp.update({ '_links': links, 'values': page.object_list, }) return resp
def get(self, request, pk): instance = self.get_object(request, pk=pk) stats_capable = settings.STATS_CAPABLE if stats_capable and not stats_capable(instance): data = { 'message': 'This field does not support stats reporting.' } return self.render( request, data, status=codes.unprocessable_entity) params = self.get_params(request) if params['aware']: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params['processor']] processor = QueryProcessor(context=context, tree=instance.model) queryset = processor.get_queryset(request=request) if instance.simple_type == 'number': resp = { 'max': instance.max(queryset=queryset), 'min': instance.min(queryset=queryset), 'avg': instance.avg(queryset=queryset) } elif (instance.simple_type == 'date' or instance.simple_type == 'time' or instance.simple_type == 'datetime'): resp = { 'max': instance.max(queryset=queryset), 'min': instance.min(queryset=queryset) } else: resp = { 'count': instance.count(queryset=queryset, distinct=True) } usage.log('stats', instance=instance, request=request) return resp
def get(self, request, pk): instance = self.get_object(request, pk=pk) if is_field_orphaned(instance): data = { 'message': 'Orphaned fields do not support distribution calls.' } return self.render(request, data, status=codes.unprocessable_entity) params = self.get_params(request) if params['aware']: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params['processor']] processor = QueryProcessor(context=context, tree=instance.model) queryset = processor.get_queryset(request=request) # Get the value/label mapping to augment the result for display value_labels = instance.value_labels(queryset=queryset) result = [] for value, count in instance.dist(queryset=queryset): if value in value_labels: label = value_labels[value] else: label = smart_unicode(value) result.append({ 'value': value, 'label': label, 'count': count, }) usage.log('dist', instance=instance, request=request) return result
def post(self, request): params = self.get_params(request) form = ContextForm(request, request.data) if form.is_valid(): instance = form.save() usage.log('create', instance=instance, request=request) request.session.modified = True data = self.prepare(request, instance, tree=params['tree']) return self.render(request, data, status=codes.created) data = { 'message': 'Error creating context', 'errors': dict(form.errors), } response = self.render(request, data, status=codes.unprocessable_entity) return response
def get(self, request, pk=None): params = self.get_params(request) order = ['-category__order' if params['order'] == 'desc' else 'category__order'] queryset = self.get_queryset(request, params) # If Haystack is installed, perform the search if params['query'] and OPTIONAL_DEPS['haystack']: usage.log('search', model=self.model, request=request, data={ 'query': params['query'], }) queryset = self.model.objects.search( params['query'], queryset=queryset, max_results=params['limit'], partial=True) if self.checks_for_orphans: queryset = self._get_non_orphans(queryset) objects = (x.object for x in queryset) else: if params['sort'] == 'name': order.append('-name' if params['order'] == 'desc' else 'name') # We need to order before a possible slice is taken because # querysets cannot be ordered post-slice. queryset = queryset.order_by(*order) if self.checks_for_orphans: queryset = self._get_non_orphans(queryset) if params['limit']: queryset = queryset[:params['limit']] objects = queryset return self.prepare(request, objects, **params)
def get(self, request, pk): instance = self.get_object(request, pk=pk) stats_capable = settings.STATS_CAPABLE if stats_capable and not stats_capable(instance): data = {'message': 'This field does not support stats reporting.'} return self.render(request, data, status=codes.unprocessable_entity) params = self.get_params(request) if params['aware']: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params['processor']] processor = QueryProcessor(context=context, tree=instance.model) queryset = processor.get_queryset(request=request) if instance.simple_type == 'number': resp = { 'max': instance.max(queryset=queryset), 'min': instance.min(queryset=queryset), 'avg': instance.avg(queryset=queryset) } elif (instance.simple_type == 'date' or instance.simple_type == 'time' or instance.simple_type == 'datetime'): resp = { 'max': instance.max(queryset=queryset), 'min': instance.min(queryset=queryset) } else: resp = {'count': instance.count(queryset=queryset, distinct=True)} usage.log('stats', instance=instance, request=request) return resp
def process(self, request, result_data): export_type = result_data['export_type'] exporter = result_data['processor'].get_exporter( get_exporter_class(export_type)) page = result_data['page'] stop_page = result_data['stop_page'] # Build a file name for the export file based on the page range. if page: file_tag = 'p{0}'.format(page) if stop_page and stop_page > page: file_tag = 'p{0}-{1}'.format(page, stop_page) else: file_tag = 'all' resp = HttpResponse() exporter.write(result_data['rows'], buff=resp, request=request) filename = '{0}-{1}-data.{2}'.format(file_tag, datetime.now(), exporter.file_extension) cookie_name = settings.EXPORT_COOKIE_NAME_TEMPLATE.format(export_type) resp.set_cookie(cookie_name, settings.EXPORT_COOKIE_DATA) resp['Content-Disposition'] = 'attachment; filename="{0}"'\ .format(filename) resp['Content-Type'] = exporter.content_type usage.log('export', request=request, data={ 'type': export_type, 'partial': page is not None, }) return resp
def put(self, request, **kwargs): params = self.get_params(request) tree = params['tree'] processor = params['processor'] instance = self.get_object(request, **kwargs) form = ContextForm(request, request.data, instance=instance, processor=processor, tree=tree) if form.is_valid(): instance = form.save() usage.log('update', instance=instance, request=request) request.session.modified = True data = self.prepare(request, instance, tree=params['tree']) return self.render(request, data) data = { 'message': 'Error updating context', 'errors': dict(form.errors), } return self.render(request, data, status=codes.unprocessable_entity)
def test_model(self): usage.log('test', model=DataField, async=False) event = Log.objects.all()[0] self.assertEqual(event.content_type.model_class(), DataField)
def get(self, request, pk): instance = self.get_object(request, pk=pk) if is_field_orphaned(instance): data = {'message': 'Orphaned fields do not support values calls.'} return self.render(request, data, status=codes.unprocessable_entity) params = self.get_params(request) if params['aware']: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params['processor']] processor = QueryProcessor(tree=instance.model, context=context) queryset = processor.get_queryset(request=request) if params['random']: # In the case that the queryset contains a population smaller than # the number of random items being requested, a ValueError will be # triggered. Instead of passing the error on to the client, we # simply return all the possible values. try: return self.get_random_values(request, instance, params['random'], queryset) except ValueError: return instance.values(queryset=queryset) page = params['page'] limit = params['limit'] # If a query term is supplied, perform the icontains search. if params['query']: usage.log('items', instance=instance, request=request, data={ 'query': params['query'], }) values = self.get_search_values(request, instance, params['query'], queryset) else: values = self.get_all_values(request, instance, queryset) # No page specified, return everything. if page is None: return values paginator = self.get_paginator(values, limit=limit) page = paginator.page(page) # Get paginator-based response. data = self.get_page_response(request, paginator, page) data.update({ 'items': page.object_list, }) # Add links. path = reverse('serrano:field-values', kwargs={'pk': pk}) links = self.get_page_links(request, path, page, extra=params) templates = self.get_link_templates(request) response = self.render(request, content=data) return patch_response(request, response, links, templates)
def post(self, request, pk): instance = self.get_object(request, pk=pk) params = self.get_params(request) if not request.data: data = { 'message': 'Error parsing data', } return self.render(request, data, status=codes.unprocessable_entity) if isinstance(request.data, dict): array = [request.data] else: array = request.data values = [] labels = [] array_map = {} # Separate out the values and labels for the lookup. Track indexes # maintain order of array for i, datum in enumerate(array): # Value takes precedence over label if supplied if 'value' in datum: array_map[i] = 'value' values.append(datum['value']) elif 'label' in datum: array_map[i] = 'label' labels.append(datum['label']) else: data = { 'message': 'Error parsing value or lable' } return self.render(request, data, status=codes.unprocessable_entity) value_field_name = instance.field_name label_field_name = instance.label_field.name # Note, this return a context-aware or naive queryset depending # on params. Get the value and label fields so they can be filled # in below. queryset = self.get_base_values(request, instance, params)\ .values_list(value_field_name, label_field_name) lookup = Q() # Validate based on the label if labels: lookup |= Q(**{'{0}__in'.format(label_field_name): labels}) if values: lookup |= Q(**{'{0}__in'.format(value_field_name): values}) results = queryset.filter(lookup) value_labels = dict(results) label_values = dict([(v, k) for k, v in value_labels.items()]) for i, datum in enumerate(array): if array_map[i] == 'label': valid = datum['label'] in label_values if valid: value = label_values[datum['label']] else: value = datum['label'] datum['valid'] = valid datum['value'] = value else: valid = datum['value'] in value_labels if valid: label = value_labels[datum['value']] else: label = smart_unicode(datum['value']) datum['valid'] = valid datum['label'] = label usage.log('validate', instance=instance, request=request, data={ 'count': len(array), }) # Return the augmented data return request.data
def get(self, request, pk): instance = self.get_object(request, pk=pk) params = self.get_params(request) tree = trees[params.get('tree')] opts = tree.root_model._meta tree_field = DataField(pk='{0}:{1}'.format(params.get('tree'), pk), app_name=opts.app_label, model_name=opts.module_name, field_name=opts.pk.name) # This will eventually make its way in the parametizer, but lists # are not supported. dimensions = request.GET.getlist('dimensions') if params['aware']: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params['processor']] processor = QueryProcessor(context=context, tree=tree) queryset = processor.get_queryset(request=request) # Explicit fields to group by, ignore ones that dont exist or the # user does not have permission to view. Default is to group by the # reference field for disinct counts. if any(dimensions): fields = [] groupby = [] for pk in dimensions: f = self.get_object(request, pk=pk) if f: fields.append(f) groupby.append( tree.query_string_for_field(f.field, model=f.model)) else: fields = [instance] groupby = [ tree.query_string_for_field(instance.field, model=instance.model) ] # Exclude null values. Depending on the downstream use of the data, # nulls may or may not be desirable. if not params['nulls']: q = Q() for field in groupby: q = q & Q(**{'{0}__isnull'.format(field): False}) queryset = queryset.filter(q) queryset = queryset.values(*groupby) # Begin constructing the response resp = { 'data': [], 'outliers': [], 'clustered': False, 'size': 0, } queryset = queryset.annotate(count=Count(tree_field.field.name))\ .values_list('count', *groupby) # Evaluate list of points length = len(queryset) # Nothing to do if not length: usage.log('dims', instance=instance, request=request, data={ 'size': 0, 'clustered': False, 'aware': params['aware'], }) return resp if length > MAXIMUM_OBSERVATIONS: data = { 'message': 'Data too large', } return self.render(request, data, status=codes.unprocessable_entity) # Apply ordering. If any of the fields are enumerable, ordering should # be relative to those fields. For continuous data, the ordering is # relative to the count of each group if (any([d.enumerable for d in fields]) and not params['sort'] == 'count'): queryset = queryset.order_by(*groupby) else: queryset = queryset.order_by('-count') clustered = False points = [{ 'count': point[0], 'values': point[1:], } for point in list(queryset)] outliers = [] # For N-dimensional continuous data, check if clustering should occur # to down-sample the data. if all([d.simple_type == 'number' for d in fields]): # Extract observations for clustering. obs = [] null_points = [] numeric_points = [] for i, point in enumerate(points): # We need to handle points that have null dimensions # differently than those that are all numeric as the kmeans # module currently cannot handle mixed type dimensions so we # only allow fully numeric points to be passed to the kmeans # module. if None in point['values']: null_points.append(point) continue for i, dim in enumerate(point['values']): if isinstance(dim, Decimal): point['values'][i] = float(str(dim)) numeric_points.append(point) obs.append(point['values']) # Perform k-means clustering. Determine centroids and calculate # the weighted count relatives to the centroid and observations # within the kmeans module. if params['cluster'] and length >= MINIMUM_OBSERVATIONS: clustered = True counts = [p['count'] for p in numeric_points] points, outliers = kmeans.weighted_counts( obs, counts, params['n']) else: indexes = kmeans.find_outliers(obs, normalized=False) outliers = [] for idx in indexes: outliers.append(numeric_points[idx]) numeric_points[idx] = None points = [p for p in numeric_points if p is not None] # Now that we have done the analysis using the purely numeric # points, we can add the mixed/null dimensionality points back in # to the list before returning results. points += null_points usage.log('dims', instance=instance, request=request, data={ 'size': length, 'clustered': clustered, 'aware': params['aware'], }) labeled_points = [] value_labels = tree_field.value_labels(queryset=queryset) for point in points: labeled_points.append({ 'count': point['count'], 'values': [{ 'label': value_labels.get(value, smart_unicode(value)), 'value': value } for value in point['values']] }) return { 'data': labeled_points, 'clustered': clustered, 'outliers': outliers, 'size': length, }
def post(self, request, pk): instance = self.get_object(request, pk=pk) params = self.get_params(request) if not request.data: data = { 'message': 'Error parsing data', } return self.render(request, data, status=codes.unprocessable_entity) if isinstance(request.data, dict): array = [request.data] else: array = request.data values = [] labels = [] array_map = {} # Separate out the values and labels for the lookup. Track indexes # maintain order of array for i, datum in enumerate(array): # Value takes precedence over label if supplied. if 'value' in datum: array_map[i] = 'value' values.append(datum['value']) elif 'label' in datum: array_map[i] = 'label' labels.append(datum['label']) else: data = {'message': 'Error parsing value or label'} return self.render(request, data, status=codes.unprocessable_entity) value_field_name = instance.field_name label_field_name = instance.label_field.name # Note, this return a context-aware or naive queryset depending # on params. Get the value and label fields so they can be filled # in below. queryset = self.get_base_values(request, instance, params)\ .values_list(value_field_name, label_field_name) lookup = Q() # Validate based on the label. if labels: lookup |= Q(**{'{0}__in'.format(label_field_name): labels}) if values: lookup |= Q(**{'{0}__in'.format(value_field_name): values}) results = queryset.filter(lookup) value_labels = dict(results) label_values = dict([(v, k) for k, v in value_labels.items()]) for i, datum in enumerate(array): if array_map[i] == 'label': valid = datum['label'] in label_values if valid: value = label_values[datum['label']] else: value = datum['label'] datum['valid'] = valid datum['value'] = value else: valid = datum['value'] in value_labels if valid: label = value_labels[datum['value']] else: label = smart_unicode(datum['value']) datum['valid'] = valid datum['label'] = label usage.log('validate', instance=instance, request=request, data={ 'count': len(array), }) # Return the augmented data. return request.data
def get(self, request, pk): instance = self.get_object(request, pk=pk) params = self.get_params(request) if params['aware']: context = self.get_context(request) else: context = None QueryProcessor = pipeline.query_processors[params['processor']] processor = QueryProcessor(tree=instance.model, context=context) queryset = processor.get_queryset(request=request) if params['random']: # In the case that the queryset contains a population smaller than # the number of random items being requested, a ValueError will be # triggered. Instead of passing the error on to the client, we # simply return all the possible values. try: return self.get_random_values( request, instance, params['random'], queryset) except ValueError: return instance.values(queryset=queryset) page = params['page'] limit = params['limit'] # If a query term is supplied, perform the icontains search. if params['query']: usage.log('values', instance=instance, request=request, data={ 'query': params['query'], }) values = self.get_search_values( request, instance, params['query'], queryset) else: values = self.get_all_values(request, instance, queryset) # No page specified, return everything. if page is None: return values paginator = self.get_paginator(values, limit=limit) page = paginator.page(page) # Get paginator-based response. resp = self.get_page_response(request, paginator, page) # Add links. path = reverse('serrano:field-values', kwargs={'pk': pk}) links = self.get_page_links(request, path, page, extra=params) links['parent'] = { 'href': request.build_absolute_uri(reverse('serrano:field', kwargs={'pk': pk})), } resp.update({ '_links': links, 'values': page.object_list, }) return resp
def get(self, request, pk): instance = self.get_object(request, pk=pk) usage.log('read', instance=instance, request=request) return self.prepare(request, instance)
def test_error(self): # Pass non-JSON serializable data usage.log('test', data={'some': TransactionTestCase}, async=False) self.assertEqual(Log.objects.count(), 0) self.assertEqual(len(mock_handler.messages['error']), 1)
def get(self, request, pk): instance = self.get_object(request, pk=pk) params = self.get_params(request) tree = trees[params.get('tree')] opts = tree.root_model._meta tree_field = DataField(app_name=opts.app_label, model_name=opts.module_name, field_name=opts.pk.name) # This will eventually make it's way in the parametizer, but lists # are not supported dimensions = request.GET.getlist('dimensions') # The `aware` flag toggles the behavior of the distribution by making # it relative to the applied context or not if params['aware']: attrs = None else: attrs = {} # Get and apply context relative to the tree context = self.get_context(request, attrs=attrs) queryset = context.apply(tree=tree) # Explicit fields to group by, ignore ones that dont exist or the # user does not have permission to view. Default is to group by the # reference field for distinct counts. if any(dimensions): fields = [] groupby = [] for pk in dimensions: f = self.get_object(request, pk=pk) if f: fields.append(f) groupby.append(tree.query_string_for_field(f.field)) else: fields = [instance] groupby = [tree.query_string_for_field(instance.field)] # Perform a count aggregation of the tree model grouped by the # specified dimensions stats = tree_field.count(*groupby) # Apply it relative to the queryset stats = stats.apply(queryset) # Exclude null values. Dependending on the downstream use of the data, # nulls may or may not be desirable. if not params['nulls']: q = Q() for field in groupby: q = q | Q(**{field: None}) stats = stats.exclude(q) # Begin constructing the response resp = { 'data': [], 'outliers': [], 'clustered': False, 'size': 0, } # Evaluate list of points length = len(stats) # Nothing to do if not length: usage.log('dist', instance=instance, request=request, data={ 'size': 0, 'clustered': False, 'aware': params['aware'], }) return resp if length > MAXIMUM_OBSERVATIONS: data = { 'message': 'Data too large', } return self.render(request, data, status=codes.unprocessable_entity) # Apply ordering. If any of the fields are enumerable, ordering should # be relative to those fields. For continuous data, the ordering is # relative to the count of each group if (any([d.enumerable for d in fields]) and not params['sort'] == 'count'): stats = stats.order_by(*groupby) else: stats = stats.order_by('-count') clustered = False points = list(stats) outliers = [] # For N-dimensional continuous data, check if clustering should occur # to down-sample the data. if all([d.simple_type == 'number' for d in fields]): # Extract observations for clustering obs = [] for point in points: for i, dim in enumerate(point['values']): if isinstance(dim, Decimal): point['values'][i] = float(str(dim)) obs.append(point['values']) # Perform k-means clustering. Determine centroids and calculate # the weighted count relatives to the centroid and observations # within the kmeans module. if params['cluster'] and length >= MINIMUM_OBSERVATIONS: clustered = True counts = [p['count'] for p in points] points, outliers = kmeans.weighted_counts( obs, counts, params['n']) else: indexes = kmeans.find_outliers(obs, normalized=False) outliers = [] for idx in indexes: outliers.append(points[idx]) points[idx] = None points = [p for p in points if p is not None] usage.log('dist', instance=instance, request=request, data={ 'size': length, 'clustered': clustered, 'aware': params['aware'], }) return { 'data': points, 'clustered': clustered, 'outliers': outliers, 'size': length, }
def test_data(self): usage.log('test', data={'some': 'data'}, async=False) event = Log.objects.all()[0] self.assertEqual(event.data, {'some': 'data'})