예제 #1
0
파일: views.py 프로젝트: Fjoerfoks/zamboni
    def get(self, request, pk, metric):
        if metric not in APP_STATS:
            raise http.Http404('No metric by that name.')

        app = self.get_object()

        stat = APP_STATS[metric]

        # Perform form validation.
        form = StatsForm(request.GET)
        if not form.is_valid():
            exc = ParseError()
            exc.detail = {'detail': dict(form.errors.items())}
            raise exc

        qs = form.cleaned_data

        dimensions = {'app-id': app.id}

        if 'dimensions' in stat:
            for key, default in stat['dimensions'].items():
                val = request.GET.get(key, default)
                if val is not None:
                    # Avoid passing kwargs to the monolith client when the
                    # dimension is None to avoid facet filters being applied.
                    dimensions[key] = request.GET.get(key, default)

        return Response(_get_monolith_data(stat, qs.get('start'),
                                           qs.get('end'), qs.get('interval'),
                                           dimensions))
예제 #2
0
 def get(self, request):
     form = Form(request.GET)
     if not form.is_valid():
         exc = ParseError()
         exc.detail = {'detail': dict(form.errors.items())}
         raise exc
     return Response({"study": "hello-world"})
예제 #3
0
  def rfid(self, request, pk):
    """
    Set a Users RFID(privileged operation).
    ---

    serializer: api.serializers.RFIDSerializer
    """
    u = self.get_object()

    user = request._user

    serializer = RFIDSerializer(data=request.data)
    serializer.is_valid(raise_exception=True)
    rfid = serializer.validated_data['rfid']

    u.rfid = rfid

    # save() causes server error
    try:
      u.save()
    except IntegrityError:
      error = ParseError(detail="RFID already belongs to another member.")
      error.errno = DUPLICATE
      raise error

    api_request = create_api_request(request, serializer)
    api_request.user = u
    api_request.save()

    return Response({
      "api_request": api_request.id
    })
예제 #4
0
파일: views.py 프로젝트: mozilla/crashathon
 def get(self, request):
     form = StatsForm(request.GET)
     if not form.is_valid():
         exc = ParseError()
         exc.detail = {'detail': dict(form.errors.items())}
         raise exc
     ids_and_counts = collect_id_counts(**form.cleaned_data)
     return Response({"crashes": ids_and_counts})
예제 #5
0
 def validate(self, data):
     try:
         self.instance = LangPack.from_upload(data['upload'],
                                              instance=self.instance)
     except forms.ValidationError, e:
         exc = ParseError()
         exc.detail = {u'detail': e.messages}
         raise exc
예제 #6
0
 def list(self, request, *args, **kwargs):
     if not request.query_params:
         raise ParseError(detail="Missing required fields")
     return super(ItemList, self).list(request, *args, **kwargs)
예제 #7
0
    def posts(self, request, pk=None):
        # uname = request.user
        # uid = uname.id
        uid = getAuthorIdForApiRequest(request)
        if uid == None:
            raise ParseError(
                "No correct X-User header or authentication were provided.")
        uid = str(uid).replace('-', '')

        viewable_posts = []

        try:
            uname = request.user
            uid = uname.id

            # ------------- set queries by Tolu ----------------------
            userUser = CustomUser.objects.filter(pk=uid)[0].id
            hostHost = CustomUser.objects.filter(pk=uid)[0].host
            option1 = Post.objects.filter(author=userUser)
            authorized_posts = PostAuthorizedAuthor.objects.filter(
                authorized_author=userUser).values_list('post_id', flat=True)
            option2 = Post.objects.filter(pk__in=authorized_posts)
            friendZone = Friendship.objects.filter(
                friend_a=userUser).values_list('friend_b', flat=True)
            fofriendZone = Friendship.objects.filter(
                friend_a__in=friendZone).values_list('friend_b', flat=True)
            option3 = Post.objects.filter(
                Q(author__in=friendZone) & Q(privacy_setting=3)
                | Q(author__in=friendZone) & Q(privacy_setting=4))
            option4 = Post.objects.filter(
                Q(author__in=fofriendZone) & Q(privacy_setting=4))
            option5 = Post.objects.filter(
                Q(author__in=friendZone) & Q(privacy_setting=5)
                & Q(original_host=hostHost))
            option6 = Post.objects.filter(Q(privacy_setting=6))
            unlistedPosts = Post.objects.filter(
                Q(is_unlisted=True) & ~Q(author=userUser))
            allPosts = option1.union(option2, option3, option4, option5,
                                     option6)
            if unlistedPosts.exists():
                viewable_posts = allPosts.difference(unlistedPosts).order_by(
                    '-published')
            else:
                viewable_posts = allPosts.order_by('-published')
        except:
            pass

        allowed_posts = viewable_posts

        request_user_id = getAuthorIdForApiRequest(request)
        if request_user_id == None:
            raise ParseError(
                "No correct X-User header or authentication were provided.")

        paginator = PostsPagination()
        paginated_posts = paginator.paginate_queryset(allowed_posts, request)
        serialized_posts = PostSerializer(paginated_posts, many=True)

        response = OrderedDict()
        response.update({"query": "posts"})
        response.update({"count": len(allowed_posts)})
        response.update({"size": Services.get_page_size(request, paginator)})
        response.update({"next": None})
        response.update({"previous": None})

        posts = []

        for post in serialized_posts.data:
            # Get single post information
            postId = str(post["id"])
            posts.append(getPostData(request, pk=postId))

        response.update({"posts": posts})

        if paginator.get_next_link() is not None:
            response["next"] = paginator.get_next_link()
        if paginator.get_previous_link() is not None:
            response["previous"] = paginator.get_previous_link()
        return Response(response)
예제 #8
0
    def save_project(self, validated_data, existing_project_instance):
        """save project.

        :param validated_data:
        :param existing_project_instance:
        :return: :raise ParseError:
        """

        parent_project = validated_data.pop('parent_project', None)
        if parent_project:
            raise ParseError(
                'Projects with parent_project value set are archived, ' +
                'readonly records. Update fail')

        # Project Question responses
        proj_question_responses_data = validated_data.pop(
            'project_question_responses', None)

        # Supported institutions
        institutions_data = validated_data.pop('institutions', None)

        # Publications
        publications_data = validated_data.pop('publications', None)

        # Grants
        grants_data = validated_data.pop('grants', None)

        # Project identifiers
        proj_identifiers_data = validated_data.pop('project_ids', None)

        # Project contacts
        proj_contacts_data = validated_data.pop('project_contacts', None)

        # Domains
        domains_data = validated_data.pop('domains', None)

        # Requests
        # Do not comment, we need to pop this out and use request data from
        # self.initial_data
        validated_data.pop('requests', None)

        # Project
        current_user = self.context['request'].user
        validated_data['updated_by'] = current_user
        existing_request_instances = {}
        provision_details_exists = False
        if existing_project_instance:
            provision_details_exists = existing_project_instance.\
                linked_provisiondetails.exists()
            validated_data['created_by'] = existing_project_instance.created_by
            for existing_request_instance in \
                    existing_project_instance.requests.all():
                existing_request_instances[
                    existing_request_instance.id] = existing_request_instance
        else:
            validated_data['created_by'] = current_user

        project = Project.objects.create(**validated_data)
        pd_context = utilitySerializers.\
            ProvisionDetailsSerializer.show_error_msg_context()
        if provision_details_exists:
            # note: Provision details are updated at provision time
            # so we are re-using provision_details, instead of a deep copy
            for ppd in existing_project_instance.\
                    linked_provisiondetails.all():
                temp = utilitySerializers.\
                    ProvisionDetailsSerializer(ppd.provision_details,
                                               context=pd_context)
                pd_s = utilitySerializers.\
                    ProvisionDetailsSerializer(data=temp.data)
                pd_s.is_valid(True)
                ProjectProvisionDetails.objects.create(
                    project=project,
                    provision_details=pd_s.save())

        if proj_question_responses_data:
            for proj_question_response_data in proj_question_responses_data:
                proj_question_resp_serializer = \
                    ProjectQuestionResponseSerializer(
                        data=proj_question_response_data)
                proj_question_resp_serializer.is_valid(raise_exception=True)
                proj_question_resp_serializer.save(project=project)

        if institutions_data:
            for supported_inst in institutions_data:
                supported_inst_serializer = SupportedInstitutionSerializer(
                    data=supported_inst)
                supported_inst_serializer.is_valid(raise_exception=True)
                supported_inst_serializer.save(project=project)

        if publications_data:
            for pub_data in publications_data:
                pub_serializer = PublicationSerializer(data=pub_data)
                pub_serializer.is_valid(raise_exception=True)
                pub_serializer.save(project=project)

        if grants_data:
            for grant_data in grants_data:
                grant_serializer = GrantSerializer(data=grant_data)
                grant_serializer.is_valid(raise_exception=True)
                grant_serializer.save(project=project)

        if proj_identifiers_data:
            for p_id_data in proj_identifiers_data:
                proj_id_serializer = p_id_data.get('serializer')
                if proj_id_serializer:
                    proj_id_serializer.save(project=project)

        if proj_contacts_data:
            for proj_contact_data in proj_contacts_data:
                project_contacts_serializer = ProjectContactSerializer(
                    data=proj_contact_data)
                project_contacts_serializer.is_valid(raise_exception=True)
                project_contacts_serializer.save(project=project)

        if domains_data:
            for domain_data in domains_data:
                domain_serializer = DomainSerializer(data=domain_data)
                domain_serializer.is_valid(raise_exception=True)
                domain_serializer.save(project=project)

        # Do not use the parsed validatedData, it does not contain request_id
        requests = self.initial_data.pop('requests', None)
        if requests:
            context = {}
            context['request'] = self.context['request']
            for requestData in requests:
                parent_request = requestData.pop('parent_request', None)
                if parent_request:
                    # These are archived requests, should not be updated.
                    continue

                # will be set later with new project
                requestData.pop('project', None)
                request_id = requestData.pop('id', None)
                if request_id:
                    existing_request_instance = existing_request_instances.pop(
                        request_id, None)
                    if existing_request_instance:
                        request_serializer = \
                            requestSerializers.CramsRequestSerializer(
                                existing_request_instance,
                                data=requestData,
                                context=context)
                    else:
                        raise ParseError(
                            'Project/Request mismatch, cannot find request' +
                            ' with id {}'.format(repr(request_id)))
                else:
                    request_serializer = \
                        requestSerializers.CramsRequestSerializer(
                            data=requestData, context=context)

                request_serializer.is_valid(raise_exception=True)

                request_serializer.save(project=project)

            # copy remaining requests across
            # request_status is read_only, hence cannot be passed in updateData
            context[OVERRIDE_READONLY_DATA] = {CLONE: True}
            for idKey in existing_request_instances:
                remaining_instance = existing_request_instances[idKey]
                if not remaining_instance.parent_request:
                    request_serializer = \
                        requestSerializers.CramsRequestSerializer(
                            remaining_instance, data={},
                            partial=True, context=context)
                    request_serializer.is_valid(raise_exception=True)
                    request_serializer.save(project=project)

        return project
    def post(self, request, *args, **kwargs):

        # serializer only really checks that data received was in the correct
        # format.
        serializer = WorkspaceResourceAddSerializer(data=request.data)
        if serializer.is_valid():
            workspace_uuid = kwargs['workspace_pk']
            resource_uuid = str(serializer.validated_data['resource_uuid'])
            logger.info('Adding resource ({resource_uuid}) to'
                        ' workspace ({workspace_uuid})'.format(
                            workspace_uuid=str(workspace_uuid),
                            resource_uuid=resource_uuid))

            try:
                workspace, resource = get_workspace_and_resource(
                    workspace_uuid, resource_uuid)
            except ParseError as ex:
                raise ex
            except Exception as ex:
                return Response(
                    {
                        'resource_uuid':
                        'The owner of the workspace and '
                        'resource must be the same.'
                    },
                    status=status.HTTP_400_BAD_REQUEST)

            if not resource.is_active:
                logger.info(
                    'Attempted to add an inactive Resource {resource} to'
                    ' a workspace.'.format(resource=resource))
                raise ParseError(
                    'The requested Resource'
                    ' is not currently activated, possibly due to pending'
                    ' validation.')

            if resource.resource_type is None:
                logger.info('Attempted to add a Resource {resource} without'
                            ' a validated type to a workspace.'.format(
                                resource=resource))
                raise ParseError('The requested Resource'
                                 ' has not been successfully validated.')

            # if here, workspace and resource have the same owner.
            # Now check if the requester is either that same owner
            # or an admin
            requesting_user = request.user
            if (requesting_user.is_staff) or (requesting_user
                                              == workspace.owner):
                try:
                    current_workspaces = resource.workspaces.all()
                    if workspace in current_workspaces:
                        return Response(status=status.HTTP_204_NO_CONTENT)
                    else:
                        resource.workspaces.add(workspace)
                        resource.save()
                        rs = ResourceSerializer(resource,
                                                context={'request': request})
                        return Response(rs.data,
                                        status=status.HTTP_201_CREATED)
                except Exception as ex:
                    logger.error(
                        'An exception was raised when adding a resource'
                        ' {resource_uuid} to workspace {workspace_uuid}.  Exception was:'
                        ' {ex}. \nSee related logs.'.format(
                            workspace_uuid=str(workspace_uuid),
                            resource_uuid=str(resource_uuid),
                            ex=ex))
                    return Response(
                        {}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
            else:
                return Response(
                    {
                        'resource_uuid':
                        'The owner of the workspace and '
                        'resource must match the requesting user or be'
                        ' requested by admin.'
                    },
                    status=status.HTTP_400_BAD_REQUEST)
        else:
            return Response(serializer.errors,
                            status=status.HTTP_400_BAD_REQUEST)
예제 #10
0
 def check_user_group(self, request):
     if request.data.get(
             'env') == self.env_prd and not request.user.is_superuser:
         if not request.user.groups.exists():
             raise ParseError(self.not_exists_group)
         return request.user.groups.first().id
예제 #11
0
 def to_internal_value(self, data):
     try:
         return Specialty.objects.get(name=data)
     except:
         raise ParseError('Specialty doesn\'t exist')
예제 #12
0
 def from_native(self, data):
     if type(data) is not list:
         raise ParseError("expected a list of data")
     return {'tags': data}
예제 #13
0
 def get(self, request, *args, **kwargs):
     if not self.queries:
         raise ParseError(
             "You need to provide a non-empty query (q-parameter)")
     return super(AggregateResource, self).get(request, *args, **kwargs)
예제 #14
0
    def authenticate(self, request):
        """
        Function for authentication against VoIPGRID api.
        """
        if settings.TESTING:
            return (AnonymousUser, None)

        # Get auth headers.
        auth = get_authorization_header(request)

        if not auth:
            # Raises 'Authentication credentials were not provided'.
            raise NotAuthenticated(detail=None)

        # Serialize data to check for sip_user_id.
        serializer = SipUserIdSerializer(data=request.data)
        if not serializer.is_valid(raise_exception=False):
            logger.info(
                'BAD REQUEST! Authentication failed due to invalid sip_user_id in data:\n\n{1}'
                .format(request.data, ))
            # This raises a bad request response.
            raise ParseError(detail=None)

        # Get sip_user_id.
        sip_user_id = serializer.validated_data['sip_user_id']

        # Created new headers with old auth data.
        headers = {'Authorization': auth}

        # Get user profile.
        response = requests.get(settings.VG_API_USER_URL, headers=headers)
        # Check status code.
        self._check_status_code(response.status_code)

        # Parse to json.
        json_response = response.json()

        # Get app account reference on systemuser.
        app_account_url = json_response['app_account']

        if not app_account_url:
            # Has no app account and thus no access to api.
            logger.info('No app account for systemuser {0} - {1}'.format(
                json_response['id'],
                json_response['email'],
            ))
            raise PermissionDenied(detail=None)

        # Get url for app account.
        app_account_api_url = settings.VG_API_BASE_URL + app_account_url

        # Get app account.
        response = requests.get(app_account_api_url, headers=headers)
        # Check status code.
        self._check_status_code(response.status_code)
        # Get account id.
        account_id = response.json()['account_id']

        # Compare account id to sip user id the request is meant for.
        if str(sip_user_id) != str(account_id):
            # Raise permissions denied.
            raise PermissionDenied(detail=None)

        # All good.
        return (AnonymousUser, None)
예제 #15
0
    def parse(self, stream, media_type=None, parser_context=None):
        """
        Parses the incoming bytestream as JSON and returns the resulting data
        """
        result = super(JSONParser, self).parse(stream,
                                               media_type=media_type,
                                               parser_context=parser_context)

        if not isinstance(result, dict) or 'data' not in result:
            raise ParseError('Received document does not contain primary data')

        data = result.get('data')
        view = parser_context['view']

        from rest_framework_json_api.views import RelationshipView
        if isinstance(view, RelationshipView):
            # We skip parsing the object as JSONAPI Resource Identifier Object and not a regular
            # Resource Object
            if isinstance(data, list):
                for resource_identifier_object in data:
                    if not (resource_identifier_object.get('id')
                            and resource_identifier_object.get('type')):
                        raise ParseError(
                            'Received data contains one or more malformed JSONAPI '
                            'Resource Identifier Object(s)')
            elif not (data.get('id') and data.get('type')):
                raise ParseError(
                    'Received data is not a valid JSONAPI Resource Identifier Object'
                )

            return data

        request = parser_context.get('request')

        # Check for inconsistencies
        if request.method in ('PUT', 'POST', 'PATCH'):
            resource_name = utils.get_resource_name(
                parser_context, expand_polymorphic_types=True)
            if isinstance(resource_name, six.string_types):
                if data.get('type') != resource_name:
                    raise exceptions.Conflict(
                        "The resource object's type ({data_type}) is not the type that "
                        "constitute the collection represented by the endpoint "
                        "({resource_type}).".format(
                            data_type=data.get('type'),
                            resource_type=resource_name))
            else:
                if data.get('type') not in resource_name:
                    raise exceptions.Conflict(
                        "The resource object's type ({data_type}) is not the type that "
                        "constitute the collection represented by the endpoint "
                        "(one of [{resource_types}]).".format(
                            data_type=data.get('type'),
                            resource_types=", ".join(resource_name)))
        if not data.get('id') and request.method in ('PATCH', 'PUT'):
            raise ParseError(
                "The resource identifier object must contain an 'id' member")

        # Construct the return data
        serializer_class = getattr(view, 'serializer_class', None)
        parsed_data = {'id': data.get('id')} if 'id' in data else {}
        # `type` field needs to be allowed in none polymorphic serializers
        if serializer_class is not None:
            if issubclass(serializer_class,
                          serializers.PolymorphicModelSerializer):
                parsed_data['type'] = data.get('type')
        parsed_data.update(self.parse_attributes(data))
        parsed_data.update(self.parse_relationships(data))
        parsed_data.update(self.parse_metadata(result))
        return parsed_data
예제 #16
0
    def get(self, request, organization):
        if not self.has_feature(organization, request):
            return Response(status=404)

        with sentry_sdk.start_span(op="discover.endpoint",
                                   description="parse params"):
            try:
                params = self.get_snuba_params(request, organization)
            except NoProjects:
                return Response([])

            vitals = [
                vital.lower() for vital in request.GET.getlist("vital", [])
            ]
            if len(vitals) == 0:
                raise ParseError(detail="Need to pass at least one vital")

            selected_columns = []
            aliases = {}
            for vital in vitals:
                if vital not in self.VITALS:
                    raise ParseError(detail=f"{vital} is not a valid vital")
                aliases[vital] = []
                for index, threshold in enumerate(
                        self.VITALS[vital]["thresholds"]):
                    column = f"count_at_least({vital}, {threshold})"
                    # Order aliases for later calculation
                    aliases[vital].append(get_function_alias(column))
                    selected_columns.append(column)
                selected_columns.append(f"p75({vital})")

        with self.handle_query_errors():
            events_results = discover.query(
                selected_columns=selected_columns,
                query=request.GET.get("query"),
                params=params,
                # Results should only ever have 1 result
                limit=1,
                referrer="api.events.vitals",
                auto_fields=True,
                auto_aggregations=True,
                use_aggregate_conditions=True,
            )

        results = {}
        if len(events_results["data"]) == 1:
            event_data = events_results["data"][0]
            for vital in vitals:
                groups = len(aliases[vital])
                results[vital] = {}
                total = 0

                # Go backwards so that we can subtract and get the running total
                for i in range(groups - 1, -1, -1):
                    count = event_data[aliases[vital][i]]
                    group_count = 0 if count is None else count - total
                    results[vital][self.LABELS[i]] = group_count
                    total += group_count

                results[vital]["total"] = total
                results[vital]["p75"] = event_data.get(
                    get_function_alias(f"p75({vital})"))

        return Response(results)
예제 #17
0
파일: base.py 프로젝트: lissyx/zamboni
def form_errors(forms):
    errors = _collect_form_errors(forms)
    raise ParseError(errors)
예제 #18
0
    def modify(self, request, *args, **kwargs):

        xform = self.get_object()
        http_status = status.HTTP_200_OK
        response = {}

        if request.user.has_perm("validate_xform", xform):

            owner = xform.user
            userform_id = "{}_{}".format(owner.username, xform.id_string)
            query = {
                ParsedInstance.USERFORM_ID: userform_id
            }  # Query used for MongoDB
            filter_ = {"xform_id": xform.id}  # Filter for Django ORM
            payload = {}

            try:
                payload = json.loads(request.data.get("payload", "{}"))
            except ValueError:
                http_status = status.HTTP_400_BAD_REQUEST
                response = {"detail": _("Invalid payload")}

            if http_status == status.HTTP_200_OK:

                new_validation_status_uid = payload.get(
                    "validation_status.uid")

                if new_validation_status_uid is None:
                    http_status = status.HTTP_400_BAD_REQUEST
                    response = {
                        "detail": _("No validation_status.uid provided")
                    }
                else:
                    # Create new validation_status object
                    new_validation_status = get_validation_status(
                        new_validation_status_uid, xform,
                        request.user.username)

                    # 3 scenarios to update submissions

                    # First scenario / Modify submissions based on user's query
                    if payload.get("query"):
                        # Validate if query is valid.
                        try:
                            query.update(payload.get("query"))
                        except ValueError:
                            raise ParseError(
                                _("Invalid query: %(query)s" %
                                  {'query': json.dumps(payload.get("query"))}))

                        query_kwargs = {
                            "query": json.dumps(query),
                            "fields": '["_id"]'
                        }

                        cursor = ParsedInstance.query_mongo_no_paging(
                            **query_kwargs)
                        submissions_ids = [
                            record.get("_id") for record in list(cursor)
                        ]
                        filter_.update({"id__in": submissions_ids})

                    # Second scenario / Modify submissions based on list of ids
                    elif payload.get("submissions_ids"):
                        try:
                            # Use int() to test if list of integers is valid.
                            submissions_ids = payload.get(
                                "submissions_ids", [])
                            or_ = {
                                u"$or": [{
                                    u"_id": int(submission_id)
                                } for submission_id in submissions_ids]
                            }
                            query.update(or_)
                        except ValueError:
                            raise ParseError(
                                _(
                                    "Invalid submissions ids: %(submissions_ids)s"
                                    % {
                                        'submissions_ids':
                                        json.dumps(
                                            payload.get("submissions_ids"))
                                    }))

                        filter_.update({"id__in": submissions_ids})
                    # Third scenario / Modify all submissions in form, but confirmation param must be among payload
                    elif payload.get("confirm", False) is not True:
                        http_status = status.HTTP_400_BAD_REQUEST
                        response = {"detail": _("No confirmations provided")}

                    # If everything is OK, submit data to DBs
                    if http_status == status.HTTP_200_OK:
                        # Update Postgres & Mongo
                        updated_records_count = Instance.objects.\
                            filter(**filter_).update(validation_status=new_validation_status)
                        ParsedInstance.bulk_update_validation_statuses(
                            query, new_validation_status)
                        response = {
                            "detail":
                            _("{} submissions have been updated").format(
                                updated_records_count)
                        }

            return Response(response, http_status)

        else:
            raise PermissionDenied(_(u"You do not have validate permissions."))
예제 #19
0
    def finish(self):
        if self.status != AUCTION_STATUS_OPEN:
            raise ParseError('Only open auctions can be finished')

        self._do_finishing_process()
예제 #20
0
    def get(self, request, organization):
        """
        List an Organization's Issues
        `````````````````````````````

        Return a list of issues (groups) bound to an organization.  All parameters are
        supplied as query string parameters.

        A default query of ``is:unresolved`` is applied. To return results
        with other statuses send an new query value (i.e. ``?query=`` for all
        results).

        The ``groupStatsPeriod`` parameter can be used to select the timeline
        stats which should be present. Possible values are: '' (disable),
        '24h', '14d'

        The ``statsPeriod`` parameter can be used to select a date window starting
        from now. Ex. ``14d``.

        The ``start`` and ``end`` parameters can be used to select an absolute
        date period to fetch issues from.

        :qparam string statsPeriod: an optional stat period (can be one of
                                    ``"24h"``, ``"14d"``, and ``""``).
        :qparam string groupStatsPeriod: an optional stat period (can be one of
                                    ``"24h"``, ``"14d"``, and ``""``).
        :qparam string start:       Beginning date. You must also provide ``end``.
        :qparam string end:         End date. You must also provide ``start``.
        :qparam bool shortIdLookup: if this is set to true then short IDs are
                                    looked up by this function as well.  This
                                    can cause the return value of the function
                                    to return an event issue of a different
                                    project which is why this is an opt-in.
                                    Set to `1` to enable.
        :qparam querystring query: an optional Sentry structured search
                                   query.  If not provided an implied
                                   ``"is:unresolved"`` is assumed.)
        :pparam string organization_slug: the slug of the organization the
                                          issues belong to.
        :auth: required
        :qparam list expand: an optional list of strings to opt in to additional data. Supports `inbox`
        :qparam list collapse: an optional list of strings to opt out of certain pieces of data. Supports `stats`, `lifetime`, `base`
        """
        stats_period = request.GET.get("groupStatsPeriod")
        try:
            start, end = get_date_range_from_params(request.GET)
        except InvalidParams as e:
            raise ParseError(detail=six.text_type(e))

        expand = request.GET.getlist("expand", [])
        collapse = request.GET.getlist("collapse", [])
        has_inbox = features.has("organizations:inbox", organization, actor=request.user)
        has_workflow_owners = features.has(
            "organizations:workflow-owners", organization, actor=request.user
        )
        if stats_period not in (None, "", "24h", "14d", "auto"):
            return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400)
        stats_period, stats_period_start, stats_period_end = calculate_stats_period(
            stats_period, start, end
        )

        environments = self.get_environments(request, organization)

        serializer = functools.partial(
            StreamGroupSerializerSnuba,
            environment_ids=[env.id for env in environments],
            stats_period=stats_period,
            stats_period_start=stats_period_start,
            stats_period_end=stats_period_end,
            expand=expand,
            collapse=collapse,
            has_inbox=has_inbox,
            has_workflow_owners=has_workflow_owners,
        )

        projects = self.get_projects(request, organization)
        project_ids = [p.id for p in projects]

        if not projects:
            return Response([])

        if len(projects) > 1 and not features.has(
            "organizations:global-views", organization, actor=request.user
        ):
            return Response(
                {"detail": "You do not have the multi project stream feature enabled"}, status=400
            )

        # we ignore date range for both short id and event ids
        query = request.GET.get("query", "").strip()
        if query:
            # check to see if we've got an event ID
            event_id = normalize_event_id(query)
            if event_id:
                # For a direct hit lookup we want to use any passed project ids
                # (we've already checked permissions on these) plus any other
                # projects that the user is a member of. This gives us a better
                # chance of returning the correct result, even if the wrong
                # project is selected.
                direct_hit_projects = set(project_ids) | set(
                    [project.id for project in request.access.projects]
                )
                groups = list(Group.objects.filter_by_event_id(direct_hit_projects, event_id))
                if len(groups) == 1:
                    response = Response(
                        serialize(groups, request.user, serializer(matching_event_id=event_id))
                    )
                    response["X-Sentry-Direct-Hit"] = "1"
                    return response

                if groups:
                    return Response(serialize(groups, request.user, serializer()))

            group = get_by_short_id(organization.id, request.GET.get("shortIdLookup"), query)
            if group is not None:
                # check all projects user has access to
                if request.access.has_project_access(group.project):
                    response = Response(serialize([group], request.user, serializer()))
                    response["X-Sentry-Direct-Hit"] = "1"
                    return response

        # If group ids specified, just ignore any query components
        try:
            group_ids = set(map(int, request.GET.getlist("group")))
        except ValueError:
            return Response({"detail": "Group ids must be integers"}, status=400)

        if group_ids:
            groups = list(Group.objects.filter(id__in=group_ids, project_id__in=project_ids))
            if any(g for g in groups if not request.access.has_project_access(g.project)):
                raise PermissionDenied
            return Response(serialize(groups, request.user, serializer()))

        try:
            cursor_result, query_kwargs = self._search(
                request,
                organization,
                projects,
                environments,
                {"count_hits": True, "date_to": end, "date_from": start},
            )
        except (ValidationError, discover.InvalidSearchQuery) as exc:
            return Response({"detail": six.text_type(exc)}, status=400)

        results = list(cursor_result)

        context = serialize(
            results,
            request.user,
            serializer(
                start=start,
                end=end,
                search_filters=query_kwargs["search_filters"]
                if "search_filters" in query_kwargs
                else None,
            ),
        )

        # HACK: remove auto resolved entries
        # TODO: We should try to integrate this into the search backend, since
        # this can cause us to arbitrarily return fewer results than requested.
        status = [
            search_filter
            for search_filter in query_kwargs.get("search_filters", [])
            if search_filter.key.name == "status"
        ]
        if status and status[0].value.raw_value == GroupStatus.UNRESOLVED:
            context = [r for r in context if "status" not in r or r["status"] == "unresolved"]

        response = Response(context)

        self.add_cursor_headers(request, response, cursor_result)

        # TODO(jess): add metrics that are similar to project endpoint here
        return response
예제 #21
0
def build_chart_data_for_field(xform,
                               field,
                               language_index=0,
                               choices=None,
                               group_by=None,
                               data_view=None):
    # check if its the special _submission_time META
    if isinstance(field, basestring) and field == common_tags.SUBMISSION_TIME:
        field_label = 'Submission Time'
        field_xpath = '_submission_time'
        field_type = 'datetime'
    else:
        # TODO: merge choices with results and set 0's on any missing fields,
        # i.e. they didn't have responses

        field_label = get_field_label(field, language_index)
        field_xpath = field.get_abbreviated_xpath()
        field_type = field.type

    data_type = DATA_TYPE_MAP.get(field_type, 'categorized')
    field_name = field.name if not isinstance(field, basestring) else field

    if group_by:
        group_by_name = group_by.get_abbreviated_xpath() \
            if not isinstance(group_by, basestring) else group_by

        if field_type == common_tags.SELECT_ONE \
                and group_by.type == common_tags.SELECT_ONE:
            result = get_form_submissions_grouped_by_select_one(
                xform, field_xpath, group_by_name, field_name, data_view)

            result = _flatten_multiple_dict_into_one(field_name, group_by_name,
                                                     result)

        elif field_type in common_tags.NUMERIC_LIST \
                and group_by.type == common_tags.SELECT_ONE:
            result = get_form_submissions_aggregated_by_select_one(
                xform, field_xpath, field_name, group_by_name, data_view)
        else:
            raise ParseError(u'Cannot group by %s' % group_by_name)
    else:
        result = get_form_submissions_grouped_by_field(xform, field_xpath,
                                                       field_name, data_view)

    result = _use_labels_from_field_name(field_name,
                                         field,
                                         data_type,
                                         result,
                                         choices=choices)

    if group_by:
        group_by_data_type = DATA_TYPE_MAP.get(group_by.type, 'categorized')
        grp_choices = get_field_choices(group_by, xform)
        result = _use_labels_from_group_by_name(group_by_name,
                                                group_by,
                                                group_by_data_type,
                                                result,
                                                choices=grp_choices)

    if not group_by:
        result = sorted(result, key=lambda d: d['count'])

    # for date fields, strip out None values
    if data_type == 'time_based':
        result = [r for r in result if r.get(field_name) is not None]
        # for each check if it matches the timezone regexp and convert for js
        for r in result:
            if timezone_re.match(r[field_name]):
                try:
                    r[field_name] = utc_time_string_for_javascript(
                        r[field_name])
                except ValueError:
                    pass

    return {
        'data': result,
        'data_type': data_type,
        'field_label': field_label,
        'field_xpath': field_xpath,
        'field_name': field_name,
        'field_type': field_type,
        'grouped_by': group_by_name if group_by else None
    }
예제 #22
0
def error400(request):
    raise ParseError('Bad request')
예제 #23
0
 def check_db(self, request_data):
     db = request_data.get('db')
     if not Dbconf.objects.filter(id=db):
         raise ParseError({self.not_exists_target_db})
예제 #24
0
    def parse(self, stream, media_type=None, parser_context=None):
        """
        Treats the incoming bytestream as a raw file upload and returns
        a `DataAndFiles` object.

        `.data` will be None (we expect request body to be a file content).
        `.files` will be a `QueryDict` containing one 'file' element.
        """
        parser_context = parser_context or {}
        request = parser_context['request']
        encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)
        meta = request.META
        upload_handlers = request.upload_handlers
        filename = self.get_filename(stream, media_type, parser_context)

        if not filename:
            raise ParseError(self.errors['no_filename'])

        # Note that this code is extracted from Django's handling of
        # file uploads in MultiPartParser.
        content_type = meta.get('HTTP_CONTENT_TYPE',
                                meta.get('CONTENT_TYPE', ''))
        try:
            content_length = int(
                meta.get('HTTP_CONTENT_LENGTH', meta.get('CONTENT_LENGTH', 0)))
        except (ValueError, TypeError):
            content_length = None

        # See if the handler will want to take care of the parsing.
        for handler in upload_handlers:
            result = handler.handle_raw_input(stream, meta, content_length,
                                              None, encoding)
            if result is not None:
                return DataAndFiles({}, {'file': result[1]})

        # This is the standard case.
        possible_sizes = [
            x.chunk_size for x in upload_handlers if x.chunk_size
        ]
        chunk_size = min([2**31 - 4] + possible_sizes)
        chunks = ChunkIter(stream, chunk_size)
        counters = [0] * len(upload_handlers)

        for index, handler in enumerate(upload_handlers):
            try:
                handler.new_file(None, filename, content_type, content_length,
                                 encoding)
            except StopFutureHandlers:
                upload_handlers = upload_handlers[:index + 1]
                break

        for chunk in chunks:
            for index, handler in enumerate(upload_handlers):
                chunk_length = len(chunk)
                chunk = handler.receive_data_chunk(chunk, counters[index])
                counters[index] += chunk_length
                if chunk is None:
                    break

        for index, handler in enumerate(upload_handlers):
            file_obj = handler.file_complete(counters[index])
            if file_obj is not None:
                return DataAndFiles({}, {'file': file_obj})

        raise ParseError(self.errors['unhandled'])
예제 #25
0
    def create(self, request):
        '''
        Creates a new person record and links it to another person
        needs from_person_id, relation_type, name, gender, birth_year and address
        '''

        queryset = Person.objects.filter(family_id=request.user.family_id)

        from_person_id, from_person_id_valid = intTryParse(
            request.data.get("from_person_id"))
        if not from_person_id_valid:
            raise ParseError('Invalid from_person_id')

        from_person = get_object_or_404(queryset, pk=from_person_id)

        relation_type, relation_type_valid = intTryParse(
            request.data.get("relation_type"))
        if not relation_type_valid or relation_type not in (PARTNERED, RAISED,
                                                            RAISED_BY):
            raise ParseError('Invalid relation_type')

        name = request.data.get("name")
        if not name or len(name.strip()) == 0:
            raise ParseError('Invalid name')

        gender = request.data.get("gender")
        if gender not in (MALE, FEMALE, OTHER, NON_BINARY, PREFER_NOT_TO_SAY):
            raise ParseError('Invalid gender')

        birth_year, birth_year_valid = intTryParse(
            request.POST.get("birth_year"))
        if not birth_year_valid:
            birth_year = 0

        with reversion.create_revision():
            new_person = Person(name=name.strip(),
                                gender=gender,
                                family_id=from_person.family_id,
                                birth_year=birth_year)

            address = request.data.get("address")
            if address:
                new_person.address = address

            # Hierarchy scores will eventually be deprecated
            if relation_type == PARTNERED:
                new_person.hierarchy_score = from_person.hierarchy_score
            elif relation_type == RAISED:
                new_person.hierarchy_score = from_person.hierarchy_score + 1
            elif relation_type == RAISED_BY:
                new_person.hierarchy_score = from_person.hierarchy_score - 1
            new_person.save()

            # Store some meta-information.
            reversion.set_user(request.user)
            reversion.set_comment('Create ' +
                                  request.META.get('HTTP_X_REAL_IP'))

            relation = create_relation(request.user, from_person, new_person,
                                       relation_type)
            relation_serializer = RelationSerializer(relation)

            person_serializer = PersonSerializer(new_person)
            return Response({
                'person': person_serializer.data,
                'relation': relation_serializer.data
            })
예제 #26
0
    def resolve_trend_columns(
        self,
        query: TrendQueryBuilder,
        baseline_function: str,
        column: str,
        middle: str,
    ) -> TrendColumns:
        """Construct the columns needed to calculate high confidence trends

        This is the snql version of get_trend_columns, which should be replaced
        once we're migrated
        """
        if baseline_function not in self.snql_trend_columns:
            raise ParseError(detail=f"{baseline_function} is not a supported trend function")

        aggregate_column = self.snql_trend_columns[baseline_function]
        aggregate_range_1 = query.resolve_function(
            aggregate_column.format(column=column, condition="greater", boundary=middle),
            overwrite_alias="aggregate_range_1",
        )
        aggregate_range_2 = query.resolve_function(
            aggregate_column.format(
                column=column,
                condition="lessOrEquals",
                boundary=middle,
            ),
            overwrite_alias="aggregate_range_2",
        )

        count_column = self.snql_trend_columns["count_range"]
        count_range_1 = query.resolve_function(
            count_column.format(condition="greater", boundary=middle),
            overwrite_alias="count_range_1",
        )
        count_range_2 = query.resolve_function(
            count_column.format(condition="lessOrEquals", boundary=middle),
            overwrite_alias="count_range_2",
        )

        variance_column = self.snql_trend_columns["variance"]
        variance_range_1 = query.resolve_function(
            variance_column.format(condition="greater", boundary=middle),
            overwrite_alias="variance_range_1",
        )
        variance_range_2 = query.resolve_function(
            variance_column.format(condition="lessOrEquals", boundary=middle),
            overwrite_alias="variance_range_2",
        )
        # Only add average when its not the baseline
        if baseline_function != "avg":
            avg_column = self.snql_trend_columns["avg"]
            avg_range_1 = query.resolve_function(
                avg_column.format(
                    column=column,
                    condition="greater",
                    boundary=middle,
                )
            )
            avg_range_2 = query.resolve_function(
                avg_column.format(
                    column=column,
                    condition="lessOrEquals",
                    boundary=middle,
                )
            )
        # avg will be added as the baseline
        else:
            avg_range_1 = aggregate_range_1
            avg_range_2 = aggregate_range_2

        t_test = query.resolve_division(
            Function("minus", [avg_range_1, avg_range_2]),
            Function(
                "sqrt",
                [
                    Function(
                        "plus",
                        [
                            Function(
                                "divide",
                                [
                                    variance_range_1,
                                    count_range_1,
                                ],
                            ),
                            Function(
                                "divide",
                                [
                                    variance_range_2,
                                    count_range_2,
                                ],
                            ),
                        ],
                    ),
                ],
            ),
            "t_test",
        )
        trend_percentage = query.resolve_division(
            aggregate_range_2, aggregate_range_1, "trend_percentage"
        )
        trend_difference = Function(
            "minus",
            [
                aggregate_range_2,
                aggregate_range_1,
            ],
            "trend_difference",
        )
        count_percentage = query.resolve_division(count_range_2, count_range_1, "count_percentage")
        return {
            "aggregate_range_1": aggregate_range_1,
            "aggregate_range_2": aggregate_range_2,
            "count_range_1": count_range_1,
            "count_range_2": count_range_2,
            "t_test": t_test,
            "trend_percentage": trend_percentage,
            "trend_difference": trend_difference,
            "count_percentage": count_percentage,
        }
예제 #27
0
    def handle_webhook(self):
        """
        Handle GitHub webhook events.

        It checks for all the events we support currently:

        - PUSH: Triggered on a push to a repository branch. Branch pushes and repository tag pushes
          also trigger webhook push events.

          .. note::

            ``created`` and ``deleted`` indicate if the push was a branch/tag created or deleted.
            This is required for old webhook created at Read the Docs that do not register the
            ``create`` and ``delete`` events.

            Newer webhooks created on Read the Docs, will trigger a PUSH+created=True **and** a
            CREATE event. We need to handle this in a specific way to not trigger the sync twice.

        - CREATE: Represents a created branch or tag.

        - DELETE: Represents a deleted branch or tag.

        - PULL_REQUEST: Triggered when a pull request is assigned, unassigned, labeled, unlabeled,
          opened, edited, closed, reopened, synchronize, ready_for_review, locked, unlocked or when
          a pull request review is requested or removed (``action`` will contain this data)

        See https://developer.github.com/v3/activity/events/types/

        """
        # Get event and trigger other webhook events
        action = self.data.get('action', None)
        created = self.data.get('created', False)
        deleted = self.data.get('deleted', False)
        event = self.request.META.get(GITHUB_EVENT_HEADER, GITHUB_PUSH)
        webhook_github.send(
            Project,
            project=self.project,
            data=self.data,
            event=event,
        )

        # Sync versions when a branch/tag was created/deleted
        if event in (GITHUB_CREATE, GITHUB_DELETE):
            log.info('Triggered sync_versions: project=%s event=%s',
                     self.project, event)
            return self.sync_versions_response(self.project)

        # Handle pull request events
        if all([
                self.project.has_feature(Feature.EXTERNAL_VERSION_BUILD),
                self.project.external_builds_enabled,
                event == GITHUB_PULL_REQUEST,
                action,
        ]):
            if (action in [
                    GITHUB_PULL_REQUEST_OPENED, GITHUB_PULL_REQUEST_REOPENED,
                    GITHUB_PULL_REQUEST_SYNC
            ]):
                # Trigger a build when PR is opened/reopened/sync
                return self.get_external_version_response(self.project)

            if action == GITHUB_PULL_REQUEST_CLOSED:
                # Delete external version when PR is closed
                return self.get_delete_external_version_response(self.project)

        # Sync versions when push event is created/deleted action
        if all([
                event == GITHUB_PUSH,
            (created or deleted),
        ]):
            integration = self.get_integration()
            events = integration.provider_data.get('events', [])
            if any([
                    GITHUB_CREATE in events,
                    GITHUB_DELETE in events,
            ]):
                # GitHub will send PUSH **and** CREATE/DELETE events on a creation/deletion in newer
                # webhooks. If we receive a PUSH event we need to check if the webhook doesn't
                # already have the CREATE/DELETE events. So we don't trigger the sync twice.
                return self.sync_versions_response(self.project, sync=False)

            log.info('Triggered sync_versions: project=%s events=%s',
                     self.project, events)
            return self.sync_versions_response(self.project)

        # Trigger a build for all branches in the push
        if event == GITHUB_PUSH:
            try:
                branches = [self._normalize_ref(self.data['ref'])]
                return self.get_response_push(self.project, branches)
            except KeyError:
                raise ParseError('Parameter "ref" is required')

        return None
예제 #28
0
 def save(self, *args, **kwargs):
     if not self.band and not self.artist:
         raise ParseError(detail='Band or Artist should not be empty',
                          code=status.HTTP_400_BAD_REQUEST)
     super().save(*args, **kwargs)
예제 #29
0
    def userPostComments(self, request, pk=None):
        permission_classes = (IsAuthenticated, )
        post_id = pk

        # does the post exist?
        try:
            requested_post = Post.objects.get(id=post_id)
        except:
            response = {
                'query': 'addComment',
                'success': False,
                'message': "Comment not allowed"
            }
            return Response(response, status=403)

        if request.method == "POST":
            # check that we're allowed to see the post - for now just check if the posts are public
            # for right now, just return comments from public posts
            # should we check if post visibility is serveronly/private?
            request_user_id = getAuthorIdForApiRequest(request)
            if request_user_id == None:
                raise ParseError(
                    "No correct X-User header or authentication were provided."
                )

            if Services.has_permission_to_see_post(request_user_id,
                                                   requested_post):
                body = json.loads(request.body.decode('utf-8'))

                if Services.addComment(body, post_id):

                    response = {
                        'query': 'addComment',
                        'success': True,
                        'message': "Comment Added"
                    }
                    return Response(response, status=200)
                else:
                    response = {
                        'query': 'addComment',
                        'success': False,
                        'message': "Comment not allowed"
                    }
                    return Response(response, status=403)

        elif request.method == "GET":  # this handles "GET" methods
            # check that we're allowed to see the post - for now just check if the posts are public
            # for right now, just return comments from public posts
            paginator = PostsPagination()
            # if requested_post.privacy_setting == "6":
            request_user_id = getAuthorIdForApiRequest(request)
            if request_user_id == None:
                raise ParseError(
                    "No correct X-User header or authentication were provided."
                )

            if Services.has_permission_to_see_post(request_user_id,
                                                   requested_post):
                queryset = Comment.objects.filter(
                    post=pk).order_by('-datetime')
                comments = CommentSerializer(queryset, many=True).data
                comments_response = []

                for comment in comments:
                    comments_response.append(
                        getCommentData(request, pk=comment["id"]))

                paginated_comments = paginator.paginate_queryset(
                    comments_response, request)

                response = OrderedDict()
                response.update({"query": "comments"})
                response.update({"count": len(queryset)})
                response.update(
                    {"size": Services.get_page_size(request, paginator)})
                response.update({"next": None})
                response.update({"previous": None})
                response.update({"comments": paginated_comments})

                if paginator.get_next_link() is not None:
                    response["next"] = paginator.get_next_link()
                if paginator.get_previous_link() is not None:
                    response["previous"] = paginator.get_previous_link()

                return Response(response)
            else:
                raise PermissionDenied(
                    "Forbidden: You don't have permission to access comments for this post or you provided an invalid user."
                )
        else:
            raise MethodNotAllowed(method=request.method)
예제 #30
0
파일: views.py 프로젝트: logan0709/see
 def check_approve_status(self, instance):
     step_instance = instance.step_set.all()[1]
     if step_instance.status != 0:
         raise ParseError(self.approve_warning)
예제 #31
0
    def userPosts(self, request, pk=None):
        author_id = self.kwargs['pk']
        author_id = str(author_id)
        # uname = request.user
        # uid = uname.id
        uid = getAuthorIdForApiRequest(request)
        if uid == None:
            raise ParseError(
                "No correct X-User header or authentication were provided.")
        uid = str(uid)

        # allowed_posts = Post.objects.raw(' \
        # WITH posts AS (SELECT id FROM API_post WHERE author_id in  \
        # (SELECT f2.friend_a_id AS fofid \
        #     FROM API_friendship f \
        #     JOIN API_friendship f2 ON f.friend_a_id = f2.friend_b_id \
        #     WHERE fofid NOT IN (SELECT friend_a_ID FROM API_friendship  \
        #     WHERE friend_b_id = %s) AND f.friend_b_id = %s AND fofid != %s) AND privacy_setting = 4 \
        # UNION \
        #     SELECT id FROM API_post WHERE (author_id in  \
        #     (WITH friends(fid) AS (SELECT friend_b_id FROM API_friendship WHERE friend_a_id=%s) \
        #     SELECT * FROM friends WHERE fid != %s GROUP BY fid)  \
        #     AND (privacy_setting = 3 OR privacy_setting = 4)) OR author_id = %s OR  privacy_setting = 6) \
        #     SELECT * FROM API_post WHERE id in posts \
        #     AND author_id = %s \
        #     ORDER BY published DESC', [str(uid)]*6 + [author_id])

        # Instead of this big boy query, just query for all our posts
        # Then for each post, check to see if the user has permission
        allowed_posts = []
        allPosts = Post.objects.filter(author=author_id).order_by(
            '-published')  # I think this returns them all
        for post in allPosts:
            if Services.has_permission_to_see_post(uid, post):
                allowed_posts.append(post)

        paginator = PostsPagination()
        paginated_posts = paginator.paginate_queryset(allowed_posts, request)
        serialized_posts = PostSerializer(paginated_posts, many=True)

        response = OrderedDict()
        response.update({"query": "posts"})
        response.update({"count": len(allowed_posts)})
        response.update({"size": Services.get_page_size(request, paginator)})
        response.update({"next": None})
        response.update({"previous": None})
        # response.update({"posts": serialized_posts.data})

        posts = []

        for post in serialized_posts.data:
            # Get single post information
            postId = str(post["id"])
            posts.append(getPostData(request, pk=postId))

        # response.update({"posts":serialized_posts.data})
        response.update({"posts": posts})

        if paginator.get_next_link() is not None:
            response["next"] = paginator.get_next_link()
        if paginator.get_previous_link() is not None:
            response["previous"] = paginator.get_previous_link()
        return Response(response)
예제 #32
0
 def initial(self, request, *args, **kwargs):
     super().initial(request, *args, **kwargs)
     if not self.has_required_filters:
         raise ParseError()
예제 #33
0
    def get_trend_columns(self, baseline_function, column, middle):
        """Construct the columns needed to calculate high confidence trends"""
        trend_column = self.trend_columns.get(baseline_function)
        if trend_column is None:
            raise ParseError(detail=f"{baseline_function} is not a supported trend function")

        count_column = self.trend_columns["count_range"]
        percentage_column = self.trend_columns["percentage"]
        variance_column = self.trend_columns["variance"]

        # t_test, and the columns required to calculate it
        t_test_columns = [
            variance_column.format(
                condition="greater", boundary=middle, query_alias="variance_range_1"
            ),
            variance_column.format(
                condition="lessOrEquals", boundary=middle, query_alias="variance_range_2"
            ),
        ]
        # Only add average when its not the baseline
        if baseline_function != "avg":
            avg_column = self.trend_columns["avg"]
            t_test_columns.extend(
                [
                    avg_column.format(
                        column=column,
                        condition="greater",
                        boundary=middle,
                        query_alias="avg_range_1",
                    ),
                    avg_column.format(
                        column=column,
                        condition="lessOrEquals",
                        boundary=middle,
                        query_alias="avg_range_2",
                    ),
                ]
            )
            avg_alias = "avg_range"
        # avg will be added as the baseline
        else:
            avg_alias = "aggregate_range"

        t_test_columns.append(
            self.trend_columns["t_test"].format(
                avg=avg_alias,
            )
        )

        return t_test_columns + [
            trend_column.format(
                column=column, condition="greater", boundary=middle, query_alias="aggregate_range_1"
            ),
            trend_column.format(
                column=column,
                condition="lessOrEquals",
                boundary=middle,
                query_alias="aggregate_range_2",
            ),
            percentage_column.format(alias="aggregate_range", query_alias="trend_percentage"),
            self.trend_columns["difference"].format(
                alias="aggregate_range", query_alias="trend_difference"
            ),
            count_column.format(condition="greater", boundary=middle, query_alias="count_range_1"),
            count_column.format(
                condition="lessOrEquals", boundary=middle, query_alias="count_range_2"
            ),
            percentage_column.format(alias="count_range", query_alias="count_percentage"),
        ]
예제 #34
0
    def _process_request(self, request):
        LOG.debug('Filepond API: Fetch view GET called...')
        '''
        Supports retrieving a file on the server side that the user has
        specified by calling addFile on the filepond API and passing a
        URL to a file.
        '''
        # Retrieve the target URL from the request query string target
        # target parameter, pull the file into temp upload storage and
        # return a file object.

        # First check we have a URL and parse to check it's valid
        target_url = request.query_params.get('target', None)
        if not target_url:
            raise ParseError('Required query parameter(s) missing.')

        # Use Django's URL validator to see if we've been given a valid URL
        validator = URLValidator(message=('An invalid URL <%s> has been '
                                          'provided' % (target_url)))
        try:
            validator(target_url)
        except ValidationError as e:
            raise ParseError(str(e))

        # TODO: SHould we check the headers returned when we request the
        # download to see that we're getting a file rather than an HTML page?
        # For now this check is enabled on the basis that we assume target
        # data file will not be HTML. However, there should be a way to turn
        # this off if the client knows that they want to get an HTML file.
        # TODO: *** Looks like this use of head can be removed since with
        # the new approach of streaming content to a BytesIO object, when
        # stream=True, the connection begins by being opened and only
        # fetching the headers. We could do this check then.
        try:
            header = requests.head(target_url, allow_redirects=True)
        except ConnectionError as e:
            msg = ('Unable to access the requested remote file headers: %s' %
                   str(e))
            LOG.error(msg)
            return Response(msg, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

        if header.status_code == 404:
            raise NotFound('The remote file was not found.')

        content_type = header.headers.get('Content-Type', '')

        # If the URL has returned URL content but an HTML file was not
        # requested then assume that the URL has linked to a download page or
        # some sort of error page or similar and raise an error.
        if 'html' in content_type.lower() and '.html' not in target_url:
            LOG.error('The requested data seems to be in HTML format. '
                      'Assuming this is not valid data file.')
            raise ParseError('Provided URL links to HTML content.')

        buf = BytesIO()
        upload_file_name = None
        try:
            with requests.get(target_url, allow_redirects=True,
                              stream=True) as r:
                if 'Content-Disposition' in r.headers:
                    cd = r.headers['Content-Disposition']
                    matches = re.findall('filename=(.+)', cd)
                    if len(matches):
                        upload_file_name = matches[0]
                for chunk in r.iter_content(chunk_size=1048576):
                    buf.write(chunk)
        except ConnectionError as e:
            raise NotFound('Unable to access the requested remote file: %s' %
                           str(e))

        file_id = _get_file_id()
        # If filename wasn't extracted from Content-Disposition header, get
        # from the URL or otherwise set it to the auto-generated file_id
        if not upload_file_name:
            if not target_url.endswith('/'):
                split = target_url.rsplit('/', 1)
                upload_file_name = split[1] if len(split) > 1 else split[0]
            else:
                upload_file_name = file_id

        return (buf, file_id, upload_file_name, content_type)
예제 #35
0
    def get(self, request: Request, organization) -> Response:
        if not self.has_feature(organization, request):
            return Response(status=404)
        use_snql = self.has_snql_feature(organization, request)
        sentry_sdk.set_tag("discover.use_snql", use_snql)

        try:
            params = self.get_snuba_params(request, organization)
        except NoProjects:
            return Response([])

        with sentry_sdk.start_span(op="discover.endpoint", description="trend_dates"):
            middle_date = request.GET.get("middle")
            if middle_date:
                try:
                    middle = parse_datetime_string(middle_date)
                except InvalidQuery:
                    raise ParseError(detail=f"{middle_date} is not a valid date format")
                if middle <= params["start"] or middle >= params["end"]:
                    raise ParseError(
                        detail="The middle date should be within the duration of the query"
                    )
            else:
                middle = params["start"] + timedelta(
                    seconds=(params["end"] - params["start"]).total_seconds() * 0.5
                )
            middle = datetime.strftime(middle, DateArg.date_format)

        trend_type = request.GET.get("trendType", REGRESSION)
        if trend_type not in TREND_TYPES:
            raise ParseError(detail=f"{trend_type} is not a supported trend type")

        trend_function = request.GET.get("trendFunction", "p50()")
        try:
            function, columns, _ = parse_function(trend_function)
        except InvalidSearchQuery as error:
            raise ParseError(detail=error)
        if len(columns) == 0:
            # Default to duration
            column = "transaction.duration"
        else:
            column = columns[0]

        selected_columns = self.get_field_list(organization, request)
        orderby = self.get_orderby(request)
        query = request.GET.get("query")

        if use_snql:
            with self.handle_query_errors():
                trend_query = TrendQueryBuilder(
                    dataset=Dataset.Discover,
                    params=params,
                    selected_columns=selected_columns,
                    auto_fields=False,
                    auto_aggregations=True,
                    use_aggregate_conditions=True,
                )
                snql_trend_columns = self.resolve_trend_columns(
                    trend_query, function, column, middle
                )
                trend_query.columns.extend(snql_trend_columns.values())
                trend_query.aggregates.extend(snql_trend_columns.values())
                trend_query.params["aliases"] = self.get_snql_function_aliases(
                    snql_trend_columns, trend_type
                )
                # Both orderby and conditions need to be resolved after the columns because of aliasing
                trend_query.orderby = trend_query.resolve_orderby(orderby)
                trend_query.groupby = trend_query.resolve_groupby()
                where, having = trend_query.resolve_conditions(query, use_aggregate_conditions=True)
                trend_query.where += where
                trend_query.having += having
        else:
            params["aliases"] = self.get_function_aliases(trend_type)
            trend_columns = self.get_trend_columns(function, column, middle)

        def data_fn(offset, limit):
            if use_snql:
                trend_query.offset = Offset(offset)
                trend_query.limit = Limit(limit)
                result = raw_snql_query(
                    trend_query.get_snql_query(),
                    referrer="api.trends.get-percentage-change.wip-snql",
                )
                result = discover.transform_results(
                    result, trend_query.function_alias_map, {}, None
                )
                return result
            else:
                return discover.query(
                    selected_columns=selected_columns + trend_columns,
                    query=query,
                    params=params,
                    orderby=orderby,
                    offset=offset,
                    limit=limit,
                    referrer="api.trends.get-percentage-change",
                    auto_fields=True,
                    auto_aggregations=True,
                    use_aggregate_conditions=True,
                )

        with self.handle_query_errors():
            return self.paginate(
                request=request,
                paginator=GenericOffsetPaginator(data_fn=data_fn),
                on_results=self.build_result_handler(
                    request,
                    organization,
                    params,
                    trend_function,
                    selected_columns,
                    orderby,
                    query,
                    use_snql,
                ),
                default_per_page=5,
                max_per_page=5,
            )
예제 #36
0
파일: base.py 프로젝트: Fjoerfoks/zamboni
def form_errors(forms):
    errors = _collect_form_errors(forms)
    exc = ParseError()
    exc.detail = {'detail': errors}
    raise exc