def check_rollback_able(self, instance): if not instance.rollback_able: raise ParseError(self.not_rollback_able)
def parse(self, stream, media_type=None, parser_context=None): """ Parses the incoming bytestream as JSON and returns the resulting data """ result = super(JSONParser, self).parse(stream, media_type=media_type, parser_context=parser_context) if not isinstance(result, dict) or 'data' not in result: raise ParseError('Received document does not contain primary data') data = result.get('data') view = parser_context['view'] from rest_framework_json_api.views import RelationshipView if isinstance(view, RelationshipView): # We skip parsing the object as JSONAPI Resource Identifier Object and not a regular # Resource Object if isinstance(data, list): for resource_identifier_object in data: if not (resource_identifier_object.get('id') and resource_identifier_object.get('type')): raise ParseError( 'Received data contains one or more malformed JSONAPI ' 'Resource Identifier Object(s)') elif not (data.get('id') and data.get('type')): raise ParseError( 'Received data is not a valid JSONAPI Resource Identifier Object' ) return data request = parser_context.get('request') # Check for inconsistencies if request.method in ('PUT', 'POST', 'PATCH'): resource_name = utils.get_resource_name( parser_context, expand_polymorphic_types=True) if isinstance(resource_name, six.string_types): if data.get('type') != resource_name: raise exceptions.Conflict( "The resource object's type ({data_type}) is not the type that " "constitute the collection represented by the endpoint " "({resource_type}).".format( data_type=data.get('type'), resource_type=resource_name)) else: if data.get('type') not in resource_name: raise exceptions.Conflict( "The resource object's type ({data_type}) is not the type that " "constitute the collection represented by the endpoint " "(one of [{resource_types}]).".format( data_type=data.get('type'), resource_types=", ".join(resource_name))) if not data.get('id') and request.method in ('PATCH', 'PUT'): raise ParseError( "The resource identifier object must contain an 'id' member") # Construct the return data serializer_class = getattr(view, 'serializer_class', None) parsed_data = {'id': data.get('id')} if 'id' in data else {} # `type` field needs to be allowed in none polymorphic serializers if serializer_class is not None: if issubclass(serializer_class, serializers.PolymorphicModelSerializer): parsed_data['type'] = data.get('type') parsed_data.update(self.parse_attributes(data)) parsed_data.update(self.parse_relationships(data)) parsed_data.update(self.parse_metadata(result)) return parsed_data
def filter(self, qs, value): if value in django_filters.constants.EMPTY_VALUES: raise ParseError("Invalid filter on empty value: {}".format(value)) return django_filters.Filter.filter(self, qs, value)
for n, k, v in or_filters: if n: q |= ~Q(**{k: v}) else: q |= Q(**{k: v}) args.append(q) queryset = queryset.filter(*args) for n, k, v in chain_filters: if n: q = ~Q(**{k: v}) else: q = Q(**{k: v}) queryset = queryset.filter(q) return queryset.distinct() except (FieldError, FieldDoesNotExist, ValueError), e: raise ParseError(e.args[0]) except ValidationError, e: raise ParseError(e.messages) class OrderByBackend(BaseFilterBackend): ''' Filter to apply ordering based on query string parameters. ''' def filter_queryset(self, request, queryset, view): try: order_by = None for key, value in request.GET.items(): if key in ('order', 'order_by'): if ',' in value: order_by = value.split(',')
def get(self, request, organization): """ List an Organization's Issues ````````````````````````````` Return a list of issues (groups) bound to an organization. All parameters are supplied as query string parameters. A default query of ``is:unresolved`` is applied. To return results with other statuses send an new query value (i.e. ``?query=`` for all results). The ``groupStatsPeriod`` parameter can be used to select the timeline stats which should be present. Possible values are: '' (disable), '24h', '14d' The ``statsPeriod`` parameter can be used to select a date window starting from now. Ex. ``14d``. The ``start`` and ``end`` parameters can be used to select an absolute date period to fetch issues from. :qparam string statsPeriod: an optional stat period (can be one of ``"24h"``, ``"14d"``, and ``""``). :qparam string groupStatsPeriod: an optional stat period (can be one of ``"24h"``, ``"14d"``, and ``""``). :qparam string start: Beginning date. You must also provide ``end``. :qparam string end: End date. You must also provide ``start``. :qparam bool shortIdLookup: if this is set to true then short IDs are looked up by this function as well. This can cause the return value of the function to return an event issue of a different project which is why this is an opt-in. Set to `1` to enable. :qparam querystring query: an optional Sentry structured search query. If not provided an implied ``"is:unresolved"`` is assumed.) :pparam string organization_slug: the slug of the organization the issues belong to. :auth: required :qparam list expand: an optional list of strings to opt in to additional data. Supports `inbox` :qparam list collapse: an optional list of strings to opt out of certain pieces of data. Supports `stats`, `lifetime`, `base` """ stats_period = request.GET.get("groupStatsPeriod") try: start, end = get_date_range_from_params(request.GET) except InvalidParams as e: raise ParseError(detail=str(e)) expand = request.GET.getlist("expand", []) collapse = request.GET.getlist("collapse", []) if stats_period not in (None, "", "24h", "14d", "auto"): return Response({"detail": ERR_INVALID_STATS_PERIOD}, status=400) stats_period, stats_period_start, stats_period_end = calculate_stats_period( stats_period, start, end) environments = self.get_environments(request, organization) serializer = functools.partial( StreamGroupSerializerSnuba, environment_ids=[env.id for env in environments], stats_period=stats_period, stats_period_start=stats_period_start, stats_period_end=stats_period_end, expand=expand, collapse=collapse, ) projects = self.get_projects(request, organization) project_ids = [p.id for p in projects] if not projects: return Response([]) if len(projects) > 1 and not features.has("organizations:global-views", organization, actor=request.user): return Response( { "detail": "You do not have the multi project stream feature enabled" }, status=400) # we ignore date range for both short id and event ids query = request.GET.get("query", "").strip() if query: # check to see if we've got an event ID event_id = normalize_event_id(query) if event_id: # For a direct hit lookup we want to use any passed project ids # (we've already checked permissions on these) plus any other # projects that the user is a member of. This gives us a better # chance of returning the correct result, even if the wrong # project is selected. direct_hit_projects = set(project_ids) | { project.id for project in request.access.projects } groups = list( Group.objects.filter_by_event_id(direct_hit_projects, event_id)) if len(groups) == 1: response = Response( serialize(groups, request.user, serializer(matching_event_id=event_id))) response["X-Sentry-Direct-Hit"] = "1" return response if groups: return Response( serialize(groups, request.user, serializer())) group = get_by_short_id(organization.id, request.GET.get("shortIdLookup"), query) if group is not None: # check all projects user has access to if request.access.has_project_access(group.project): response = Response( serialize([group], request.user, serializer())) response["X-Sentry-Direct-Hit"] = "1" return response # If group ids specified, just ignore any query components try: group_ids = set(map(int, request.GET.getlist("group"))) except ValueError: return Response({"detail": "Group ids must be integers"}, status=400) if group_ids: groups = list( Group.objects.filter(id__in=group_ids, project_id__in=project_ids)) if any(g for g in groups if not request.access.has_project_access(g.project)): raise PermissionDenied return Response(serialize(groups, request.user, serializer())) try: cursor_result, query_kwargs = self._search( request, organization, projects, environments, { "count_hits": True, "date_to": end, "date_from": start }, ) except (ValidationError, discover.InvalidSearchQuery) as exc: return Response({"detail": str(exc)}, status=400) results = list(cursor_result) context = serialize( results, request.user, serializer( start=start, end=end, search_filters=query_kwargs["search_filters"] if "search_filters" in query_kwargs else None, organization_id=organization.id, ), ) # HACK: remove auto resolved entries # TODO: We should try to integrate this into the search backend, since # this can cause us to arbitrarily return fewer results than requested. status = [ search_filter for search_filter in query_kwargs.get("search_filters", []) if search_filter.key.name == "status" and search_filter.operator in EQUALITY_OPERATORS ] if status and (GroupStatus.UNRESOLVED in status[0].value.raw_value): status_labels = { QUERY_STATUS_LOOKUP[s] for s in status[0].value.raw_value } context = [ r for r in context if "status" not in r or r["status"] in status_labels ] response = Response(context) self.add_cursor_headers(request, response, cursor_result) # TODO(jess): add metrics that are similar to project endpoint here return response
def get(self, request, *args, **kwargs): try: member = request.query_params['member'] if member == "BCBS": base_url = "https://10.188.101.153:8443/vocioutput/acq_connector/blond01.clarabridge.net/16394744/" elif member == "FB": base_url = "https://10.188.101.153:8443/vocioutput/acq_connector/10.80.253.172/100049/" else: raise ValidationError( 'query parameter member value is not supported') except KeyError as ex: try: base_url = request.query_params['base_url'] except KeyError as ex: raise ValidationError( 'was not able to dervive base_url from base_url or member query parameter' ) except MultiValueDictKeyError as ex: raise ValidationError( 'was not able to dervive base_url from base_url or member query parameter' ) try: project_id = request.query_params['project_id'] file_name = request.query_params['file_name'] with open(path.join(settings.CONVERSATION_ROOT, file_name)) as f: file_list = f.readlines() except KeyError as ex: raise ValidationError('query parameter {} is missing'.format(ex)) except FileNotFoundError: raise ValidationError('file {} does not exist'.format(file_name)) for file_name in file_list: try: single_file = requests.get( (base_url + file_name).strip(), auth=(settings.FILE_SERVICE_USERNAME, settings.FILE_SERVICE_PASSWORD), verify=False).text except Exception as e: raise ParseError( "couldn't retrieve {} from file server".format(base_url + file_name)) try: TextUploadAPI.save_file( user=request.user, file={ "text": single_file, "meta": { "filename": file_name, "file_no_ext": file_name.replace('.txt', '') } }, file_format="raw_json", project_id=project_id, ) except Exception as e: print(f"Failed uploading {file_name}") return Response(status=status.HTTP_201_CREATED)
def post(self, request): try: set_profile_picture(request.user, request.data['file']) except KeyError: raise ParseError("'file' field missing.") return Response("Uploaded.", status=status.HTTP_201_CREATED)
def pre_delete(self, instance): if instance.group.restricted: raise ParseError('Restricted groups can\'t be unset via the API.')
def parse(self, stream, media_type=None, parser_context=None): def set_key(data, key, value): # do not set None values to the data # the XML API returns None if the query finds nothing if value is not None: data[key] = value # the stream can be read only once, but we need to both compute # the hash of the data and parse the data as XML body_bytes = stream.read() # b64encode returns a byte string so it is decoded to a normal string body_hash = base64.b64encode( hashlib.sha1(body_bytes).digest()).decode('ASCII') stream = BytesIO( body_bytes) # a new file-like object with the same data try: tree = etree.parse(stream) except etree.XMLSyntaxError as e: logger.warning('XML syntax error in LTI Outcomes request: %s', str(e)) raise ParseError(str(e)) root = tree.getroot() if root.tag != '{ns}imsx_POXEnvelopeRequest'.format(ns=self.NS): logger.warning( 'Unexpected root element in LTI Outcomes request: %s', root.tag) raise ParseError( 'The XML root element is not "{ns}imsx_POXEnvelopeRequest"'. format(ns=self.NS)) data = {} set_key(data, 'body_hash', body_hash) set_key( data, 'version', root.findtext( '{ns}imsx_POXHeader/{ns}imsx_POXRequestHeaderInfo/{ns}imsx_version' .format(ns=self.NS))) set_key( data, 'msgid', root.findtext( '{ns}imsx_POXHeader/{ns}imsx_POXRequestHeaderInfo/{ns}imsx_messageIdentifier' .format(ns=self.NS))) body_elem = root.find('{ns}imsx_POXBody'.format(ns=self.NS)) if body_elem is not None and len(body_elem): # body element exists and has children # only one child is expected operation_elem = body_elem[0] # expecting a known request type, like readResultRequest # (three types given in the constants in this class) # remove the namespace prefix from the start of the tag name and # the suffix "Request" from the end of the tag name req_type = operation_elem.tag[len(self.NS):-7] data['req_type'] = req_type set_key( data, 'sourced_id', operation_elem.findtext( '{ns}resultRecord/{ns}sourcedGUID/{ns}sourcedId'.format( ns=self.NS))) if req_type == self.TYPE_REPLACE: set_key( data, 'score', operation_elem.findtext( '{ns}resultRecord/{ns}result/{ns}resultScore/{ns}textString' .format(ns=self.NS))) return data
def get(self, request, organization): if not self.has_feature(organization, request): return Response(status=404) try: params = self.get_snuba_params(request, organization) except NoProjects: return Response([]) with sentry_sdk.start_span(op="discover.endpoint", description="trend_dates"): middle = params["start"] + timedelta( seconds=(params["end"] - params["start"]).total_seconds() * 0.5) start, middle, end = ( datetime.strftime(params["start"], DateArg.date_format), datetime.strftime(middle, DateArg.date_format), datetime.strftime(params["end"], DateArg.date_format), ) trend_type = request.GET.get("trendType", REGRESSION) if trend_type not in TREND_TYPES: raise ParseError( detail=u"{} is not a supported trend type".format(trend_type)) params["aliases"] = self.get_function_aliases(trend_type) trend_function = request.GET.get("trendFunction", "p50()") function, columns = parse_function(trend_function) trend_columns = self.get_trend_columns(function, columns, start, middle, end) selected_columns = request.GET.getlist("field")[:] orderby = self.get_orderby(request) query = request.GET.get("query") def data_fn(offset, limit): return discover.query( selected_columns=selected_columns + trend_columns, query=query, params=params, orderby=orderby, offset=offset, limit=limit, referrer="api.trends.get-percentage-change", auto_fields=True, auto_aggregations=True, use_aggregate_conditions=True, ) with self.handle_query_errors(): return self.paginate( request=request, paginator=GenericOffsetPaginator(data_fn=data_fn), on_results=self.build_result_handler(request, organization, params, trend_function, selected_columns, orderby, query), default_per_page=5, max_per_page=5, )
def get_user(self): try: return UserProfile.objects.get(pk=self.kwargs.get('pk')) except UserProfile.DoesNotExist: raise ParseError('User must exist.')
def to_internal_value(self, data): if type(data) is not list: raise ParseError("expected a list of data") return data
def validate_sparse_fieldsets(self): if not self.only_fields: return invalid_types = set(self.only_fields) - self.included_types if invalid_types: raise ParseError("Invalid resource type(s): {}".format(invalid_types))
def validate_param_exist(param, type): if not param: msg = {"error": "Missing parameter '{}' in body.".format(type)} log.info(msg) raise ParseError(msg)
def filter_queryset(self, request, queryset, view): try: # Apply filters specified via query_params. Each entry in the lists # below is (negate, field, value). and_filters = [] or_filters = [] chain_filters = [] role_filters = [] search_filters = [] for key, values in request.query_params.lists(): if key in self.RESERVED_NAMES: continue # HACK: Make job event filtering by host name mostly work even # when not capturing job event hosts M2M. if queryset.model._meta.object_name == 'JobEvent' and key.startswith( 'hosts__name'): key = key.replace('hosts__name', 'or__host__name') or_filters.append((False, 'host__name__isnull', True)) # Custom __int filter suffix (internal use only). q_int = False if key.endswith('__int'): key = key[:-5] q_int = True # RBAC filtering if key == 'role_level': role_filters.append(values[0]) continue # Search across related objects. if key.endswith('__search'): for value in values: for search_term in force_text(value).replace( ',', ' ').split(): search_value, new_keys = self.value_to_python( queryset.model, key, search_term) assert isinstance(new_keys, list) for new_key in new_keys: search_filters.append((new_key, search_value)) continue # Custom chain__ and or__ filters, mutually exclusive (both can # precede not__). q_chain = False q_or = False if key.startswith('chain__'): key = key[7:] q_chain = True elif key.startswith('or__'): key = key[4:] q_or = True # Custom not__ filter prefix. q_not = False if key.startswith('not__'): key = key[5:] q_not = True # Make legacy v1 Job/Template fields work for backwards compatability # TODO: remove after API v1 deprecation period if queryset.model._meta.object_name in ( 'JobTemplate', 'Job') and key in ('credential', 'vault_credential', 'cloud_credential', 'network_credential'): key = 'credentials' # Make legacy v1 Credential fields work for backwards compatability # TODO: remove after API v1 deprecation period # # convert v1 `Credential.kind` queries to `Credential.credential_type__pk` if queryset.model._meta.object_name == 'Credential' and key == 'kind': key = key.replace('kind', 'credential_type') if 'ssh' in values: # In 3.2, SSH and Vault became separate credential types, but in the v1 API, # they're both still "kind=ssh" # under the hood, convert `/api/v1/credentials/?kind=ssh` to # `/api/v1/credentials/?or__credential_type=<ssh_pk>&or__credential_type=<vault_pk>` values = set(values) values.add('vault') values = list(values) q_or = True for i, kind in enumerate(values): if kind == 'vault': type_ = CredentialType.objects.get(kind=kind) else: type_ = CredentialType.from_v1_kind(kind) if type_ is None: raise ParseError( _('cannot filter on kind %s') % kind) values[i] = type_.pk # Convert value(s) to python and add to the appropriate list. for value in values: if q_int: value = int(value) value, new_key = self.value_to_python( queryset.model, key, value) if q_chain: chain_filters.append((q_not, new_key, value)) elif q_or: or_filters.append((q_not, new_key, value)) else: and_filters.append((q_not, new_key, value)) # Now build Q objects for database query filter. if and_filters or or_filters or chain_filters or role_filters or search_filters: args = [] for n, k, v in and_filters: if n: args.append(~Q(**{k: v})) else: args.append(Q(**{k: v})) for role_name in role_filters: args.append( Q(pk__in=RoleAncestorEntry.objects.filter( ancestor__in=request.user.roles.all(), content_type_id=ContentType.objects.get_for_model( queryset.model).id, role_field=role_name).values_list( 'object_id').distinct())) if or_filters: q = Q() for n, k, v in or_filters: if n: q |= ~Q(**{k: v}) else: q |= Q(**{k: v}) args.append(q) if search_filters: q = Q() for k, v in search_filters: q |= Q(**{k: v}) args.append(q) for n, k, v in chain_filters: if n: q = ~Q(**{k: v}) else: q = Q(**{k: v}) queryset = queryset.filter(q) queryset = queryset.filter(*args).distinct() return queryset except (FieldError, FieldDoesNotExist, ValueError, TypeError) as e: raise ParseError(e.args[0]) except ValidationError as e: raise ParseError(json.dumps(e.messages, ensure_ascii=False))
def get_project(self, request, organization): projects = self.get_projects(request, organization) if len(projects) != 1: raise ParseError("Only 1 project per Key Transaction") return projects[0]
def get_data_and_form(kwargs): data_id = str(kwargs.get('dataid')) if not data_id.isdigit(): raise ParseError(_(u"Data ID should be an integer")) return (data_id, kwargs.get('format'))
def post(self, request, pk): """ Create a generated document """ licence = None try: document = get_generated_document_data(request.data, pk) except AttributeError as e: return JsonResponse(data={"errors": [str(e)]}, status=status.HTTP_400_BAD_REQUEST) try: pdf = html_to_pdf(document.document_html, document.template.layout.filename) except Exception: # noqa return JsonResponse( {"errors": [strings.Cases.GeneratedDocuments.PDF_ERROR]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR ) if document.template.include_digital_signature: pdf = sign_pdf(pdf) if request.data.get("advice_type") in [ AdviceType.APPROVE, AdviceType.PROVISO, ]: try: licence = Licence.objects.get_draft_licence(pk) except Licence.DoesNotExist: raise ParseError({"non_field_errors": [strings.Cases.GeneratedDocuments.LICENCE_ERROR]}) s3_key = s3_operations.generate_s3_key(document.template.name, "pdf") # base the document name on the template name and a portion of the UUID generated for the s3 key document_name = f"{s3_key[:len(document.template.name) + 6]}.pdf" visible_to_exporter = str_to_bool(request.data.get("visible_to_exporter")) # If the template is not visible to exporter this supersedes what is given for the document # Decision documents are also hidden until finalised (see FinaliseView) if not document.template.visible_to_exporter or request.data.get("advice_type"): visible_to_exporter = False try: with transaction.atomic(): # Delete any pre-existing decision document if the documents have not been finalised # i.e. They are not visible to the exporter GeneratedCaseDocument.objects.filter( case=document.case, advice_type=request.data.get("advice_type"), visible_to_exporter=False ).delete() generated_doc = GeneratedCaseDocument.objects.create( name=document_name, user=request.user.govuser, s3_key=s3_key, virus_scanned_at=timezone.now(), safe=True, type=CaseDocumentState.GENERATED, case=document.case, template=document.template, text=document.text, visible_to_exporter=visible_to_exporter, advice_type=request.data.get("advice_type"), licence=licence, ) audit_trail_service.create( actor=request.user.govuser, verb=AuditType.GENERATE_CASE_DOCUMENT, action_object=generated_doc, target=document.case, payload={"file_name": document_name, "template": document.template.name}, ) s3_operations.upload_bytes_file(raw_file=pdf, s3_key=s3_key) except Exception: # noqa return JsonResponse( {"errors": [strings.Cases.GeneratedDocuments.UPLOAD_ERROR]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR, ) return JsonResponse(data={"generated_document": str(generated_doc.id)}, status=status.HTTP_201_CREATED)
def get(self, request, organization): if not self.has_feature(organization, request): return Response(status=404) with sentry_sdk.start_span(op="discover.endpoint", description="filter_params") as span: span.set_tag("organization", organization) try: params = self.get_filter_params(request, organization) except NoProjects: return Response([]) params = self.quantize_date_params(request, params) has_global_views = features.has("organizations:global-views", organization, actor=request.user) if not has_global_views and len(params.get("project_id", [])) > 1: raise ParseError( detail="You cannot view events from multiple projects.") middle = params["start"] + timedelta( seconds=(params["end"] - params["start"]).total_seconds() * 0.5) start, middle, end = ( datetime.strftime(params["start"], DateArg.date_format), datetime.strftime(middle, DateArg.date_format), datetime.strftime(params["end"], DateArg.date_format), ) trend_function = request.GET.get("trendFunction", "p50()") function, columns = parse_function(trend_function) trend_column = self.trend_columns.get(function) if trend_column is None: raise ParseError(detail=u"{} is not a supported trend function". format(trend_function)) count_column = self.trend_columns.get("count_range") percentage_column = self.trend_columns["percentage"] selected_columns = request.GET.getlist("field")[:] query = request.GET.get("query") orderby = self.get_orderby(request) def data_fn(offset, limit): return discover.query( selected_columns=selected_columns + [ trend_column["format"].format( *columns, start=start, end=middle, index="1"), trend_column["format"].format( *columns, start=middle, end=end, index="2"), percentage_column["format"].format( alias=trend_column["alias"]), "minus({alias}2,{alias}1)".format( alias=trend_column["alias"]), count_column["format"].format( start=start, end=middle, index="1"), count_column["format"].format( start=middle, end=end, index="2"), percentage_column["format"].format( alias=count_column["alias"]), "absolute_correlation()", ], query=query, params=params, orderby=orderby, offset=offset, limit=limit, referrer="api.trends.get-percentage-change", auto_fields=True, use_aggregate_conditions=True, ) def on_results(events_results): def get_event_stats(query_columns, query, params, rollup, reference_event): return discover.top_events_timeseries( query_columns, selected_columns, query, params, orderby, rollup, min(5, len(events_results["data"])), organization, top_events=events_results, referrer="api.trends.get-event-stats", ) stats_results = (self.get_event_stats_data( request, organization, get_event_stats, top_events=True, query_column=trend_function, ) if len(events_results["data"]) > 0 else {}) return { "events": self.handle_results_with_meta(request, organization, params["project_id"], events_results), "stats": stats_results, } with self.handle_query_errors(): return self.paginate( request=request, paginator=GenericOffsetPaginator(data_fn=data_fn), on_results=on_results, default_per_page=5, max_per_page=5, )
def test_handling_snuba_errors(self, mock_query): mock_query.side_effect = ParseError("test") with self.feature(self.feature_list): response = self.client.get(self.url, format="json") assert response.status_code == 400, response.content
def set_apps(self, obj, apps): if apps: try: obj.set_apps(apps) except Webapp.DoesNotExist: raise ParseError(detail=self.exceptions['doesnt_exist'])
def to_internal_value(self, data): try: return City.objects.get(name=data) except: raise ParseError('City doesn\'t exist')
def create(self, request, *args, **kwargs): """ Create an image instance. """ logger.info("user %s will create a new image." % request.user.username) if not request.FILES.get('file'): raise ParseError(detail="There is no image build file.") assert 'pid' in self.kwargs pid = self.kwargs['pid'] data = request.data data['project'] = pid data['user'] = request.user.id logger.debug(data) # create image metadata serializer = self.get_serializer(data=data) serializer.is_valid(raise_exception=True) images = Image.objects.filter(project__id=pid, name=data['name'], version=data['version']) if images: raise ValidationError( detail="Already has an image called {}:{}.".format( name, version)) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) response = Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers) image = serializer.data is_image = is_image_or_dockerfile(request.data.get('is_image', None)) dockerfile = None old_image_name = None old_image_version = None if is_image == 0: dockerfile = request.data.get('dockerfile', 'Dockerfile') if is_image == 1: old_image_name = request.data.get('old_image_name', image['name']) old_image_version = request.data.get('old_image_version', image['version']) filename = get_upload_image_filename(image, request.user) save_upload_file_to_disk(request.FILES['file'], filename) # create a true image instance, and upload into private registry builder = ImageBuilder(build_file=filename, is_image=is_image, dockerfile=dockerfile, image_id=image['id'], old_image_name=old_image_name, old_image_version=old_image_version) builder.create_image() return response
def get(self, request, organization): """ List an Organization's Releases ``````````````````````````````` Return a list of releases for a given organization. :pparam string organization_slug: the organization short name :qparam string query: this parameter can be used to create a "starts with" filter for the version. """ query = request.GET.get("query") with_health = request.GET.get("health") == "1" flatten = request.GET.get("flatten") == "1" sort = request.GET.get("sort") or "date" summary_stats_period = request.GET.get("summaryStatsPeriod") or "14d" health_stats_period = request.GET.get("healthStatsPeriod") or ( "24h" if with_health else "") if summary_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail( "summaryStatsPeriod", STATS_PERIODS)) if health_stats_period and health_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail( "healthStatsPeriod", STATS_PERIODS)) paginator_cls = OffsetPaginator paginator_kwargs = {} try: filter_params = self.get_filter_params(request, organization, date_filter_optional=True) except NoProjects: return Response([]) except OrganizationEventsError as e: return Response({"detail": six.text_type(e)}, status=400) # This should get us all the projects into postgres that have received # health data in the last 24 hours. If health data is not requested # we don't upsert releases. if with_health: debounce_update_release_health_data(organization, filter_params["project_id"]) queryset = Release.objects.filter( organization=organization).select_related("owner") if "environment" in filter_params: queryset = queryset.filter( releaseprojectenvironment__environment__name__in=filter_params[ "environment"], releaseprojectenvironment__project_id__in=filter_params[ "project_id"], ) if query: queryset = queryset.filter(version__istartswith=query) select_extra = {} sort_query = None if flatten: select_extra[ "_for_project_id"] = "sentry_release_project.project_id" else: queryset = queryset.distinct() if sort == "date": sort_query = "COALESCE(sentry_release.date_released, sentry_release.date_added)" elif sort in ("crash_free_sessions", "crash_free_users", "sessions", "users"): if not flatten: return Response( { "detail": "sorting by crash statistics requires flattening (flatten=1)" }, status=400, ) paginator_cls = MergingOffsetPaginator paginator_kwargs.update( data_load_func=lambda offset, limit: get_project_releases_by_stability( project_ids=filter_params["project_id"], environments=filter_params.get("environment"), scope=sort, offset=offset, stats_period=summary_stats_period, limit=limit, ), apply_to_queryset=lambda queryset, rows: queryset.filter( projects__id__in=list(x[0] for x in rows), version__in=list(x[1] for x in rows)), key_from_model=lambda x: (x._for_project_id, x.version), ) else: return Response({"detail": "invalid sort"}, status=400) if sort_query is not None: queryset = queryset.filter( projects__id__in=filter_params["project_id"]) select_extra["sort"] = sort_query paginator_kwargs["order_by"] = "-sort" queryset = queryset.extra(select=select_extra) if filter_params["start"] and filter_params["end"]: queryset = queryset.extra( where=[ "COALESCE(sentry_release.date_released, sentry_release.date_added) BETWEEN %s and %s" ], params=[filter_params["start"], filter_params["end"]], ) return self.paginate(request=request, queryset=queryset, paginator_cls=paginator_cls, on_results=lambda x: serialize( x, request.user, with_health_data=with_health, health_stats_period=health_stats_period, summary_stats_period=summary_stats_period, ), **paginator_kwargs)
def get(self, request, organization): """ List an Organization's Releases ``````````````````````````````` Return a list of releases for a given organization. :pparam string organization_slug: the organization short name :qparam string query: this parameter can be used to create a "starts with" filter for the version. """ query = request.GET.get("query") with_health = request.GET.get("health") == "1" status_filter = request.GET.get("status", "open") flatten = request.GET.get("flatten") == "1" sort = request.GET.get("sort") or "date" health_stat = request.GET.get("healthStat") or "sessions" summary_stats_period = request.GET.get("summaryStatsPeriod") or "14d" health_stats_period = request.GET.get("healthStatsPeriod") or ( "24h" if with_health else "") if summary_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail( "summaryStatsPeriod", STATS_PERIODS)) if health_stats_period and health_stats_period not in STATS_PERIODS: raise ParseError(detail=get_stats_period_detail( "healthStatsPeriod", STATS_PERIODS)) if health_stat not in ("sessions", "users"): raise ParseError(detail="invalid healthStat") paginator_cls = OffsetPaginator paginator_kwargs = {} try: filter_params = self.get_filter_params(request, organization, date_filter_optional=True) except NoProjects: return Response([]) # This should get us all the projects into postgres that have received # health data in the last 24 hours. debounce_update_release_health_data(organization, filter_params["project_id"]) queryset = Release.objects.filter(organization=organization) if status_filter: try: status_int = ReleaseStatus.from_string(status_filter) except ValueError: raise ParseError(detail="invalid value for status") if status_int == ReleaseStatus.OPEN: queryset = queryset.filter( Q(status=status_int) | Q(status=None)) else: queryset = queryset.filter(status=status_int) queryset = queryset.select_related("owner").annotate( date=F("date_added")) queryset = add_environment_to_queryset(queryset, filter_params) if query: query_q = Q(version__icontains=query) suffix_match = _release_suffix.match(query) if suffix_match is not None: query_q |= Q(version__icontains="%s+%s" % suffix_match.groups()) queryset = queryset.filter(query_q) select_extra = {} queryset = queryset.distinct() if flatten: select_extra[ "_for_project_id"] = "sentry_release_project.project_id" if sort == "date": queryset = queryset.filter( projects__id__in=filter_params["project_id"]).order_by("-date") paginator_kwargs["order_by"] = "-date" elif sort in ( "crash_free_sessions", "crash_free_users", "sessions", "users", "sessions_24h", "users_24h", ): if not flatten: return Response( { "detail": "sorting by crash statistics requires flattening (flatten=1)" }, status=400, ) paginator_cls = MergingOffsetPaginator paginator_kwargs.update( data_load_func=lambda offset, limit: get_project_releases_by_stability( project_ids=filter_params["project_id"], environments=filter_params.get("environment"), scope=sort, offset=offset, stats_period=summary_stats_period, limit=limit, ), apply_to_queryset=lambda queryset, rows: queryset.filter( projects__id__in=list(x[0] for x in rows), version__in=list(x[1] for x in rows)), key_from_model=lambda x: (x._for_project_id, x.version), ) else: return Response({"detail": "invalid sort"}, status=400) queryset = queryset.extra(select=select_extra) queryset = add_date_filter_to_queryset(queryset, filter_params) return self.paginate( request=request, queryset=queryset, paginator_cls=paginator_cls, on_results=lambda x: serialize( x, request.user, with_health_data=with_health, health_stat=health_stat, health_stats_period=health_stats_period, summary_stats_period=summary_stats_period, environments=filter_params.get("environment") or None, ), **paginator_kwargs, )
def to_internal_value(self, data): try: question = Question.objects.get(pk=data) return question except Question.DoesNotExist: raise ParseError({"error": "질문이 존재하지 않습니다."})
def create(self, validated_data): """ 应用更新时的预览数据,这个时候目标release还没有创建 """ instance = App.objects.get(id=self.app_id) check_cluster_perm(user=self.context["request"].user, project_id=instance.project_id, cluster_id=instance.cluster_id, request=self.context["request"]) # 标记Chart中的values.yaml是否发生变化,用于提醒用户 chart_version_changed = False # prepare parameters parameters = merge_rancher_answers(validated_data["get_answers"], validated_data["get_customs"]) chart_version_id = validated_data["upgrade_verion"] chart_version_id = int(chart_version_id) if chart_version_id == KEEP_TEMPLATE_UNCHANGED: files = instance.release.chartVersionSnapshot.files else: chart_version_changed = True chart_version = ChartVersion.objects.get(id=chart_version_id) files = chart_version.files valuefile = get_valuefile_with_bcs_variable_injected( access_token=self.context["request"].user.token.access_token, project_id=instance.project_id, namespace_id=instance.namespace_id, valuefile=validated_data["valuefile"]) client = KubeHelmClient(helm_bin=settings.HELM_BIN) try: content, notes = client.template( files=files, namespace=instance.namespace, name=instance.name, parameters=parameters, valuefile=valuefile, ) except helm_exceptions.HelmBaseException as e: raise ParseError(str(e)) # inject bcs info now = datetime.datetime.now() content = bcs_info_injector.inject_bcs_info( access_token=self.access_token, project_id=instance.project_id, cluster_id=instance.cluster_id, namespace_id=instance.namespace_id, namespace=instance.namespace, creator=instance.creator, updator=self.context["request"].user.username, created_at=instance.created, updated_at=now, resources=content, version=instance.release.chartVersionSnapshot.version, ) # compute diff old_content = instance.release.content if not old_content: old_content, _ = instance.render_app( username=self.context["request"].user.username, access_token=self.access_token) difference = simple_diff(old_content, content, instance.namespace) return { "content": preview_parse(content, instance.namespace), "notes": notes, "difference": difference, "chart_version_changed": chart_version_changed, "old_content": old_content }
def get_field_from_lookup(self, model, lookup): field = None parts = lookup.split('__') if parts and parts[-1] not in self.SUPPORTED_LOOKUPS: parts.append('exact') # FIXME: Could build up a list of models used across relationships, use # those lookups combined with request.user.get_queryset(Model) to make # sure user cannot query using objects he could not view. new_parts = [] # Store of all the fields used to detect repeats field_set = set([]) for name in parts[:-1]: # HACK: Make project and inventory source filtering by old field names work for backwards compatibility. if model._meta.object_name in ('Project', 'InventorySource'): name = { 'current_update': 'current_job', 'last_update': 'last_job', 'last_update_failed': 'last_job_failed', 'last_updated': 'last_job_run', }.get(name, name) if name == 'type' and 'polymorphic_ctype' in get_all_field_names( model): name = 'polymorphic_ctype' new_parts.append('polymorphic_ctype__model') else: new_parts.append(name) if name in getattr(model, 'PASSWORD_FIELDS', ()): raise PermissionDenied( _('Filtering on password fields is not allowed.')) elif name == 'pk': field = model._meta.pk else: name_alt = name.replace("_", "") if name_alt in model._meta.fields_map.keys(): field = model._meta.fields_map[name_alt] new_parts.pop() new_parts.append(name_alt) else: field = model._meta.get_field(name) if isinstance(field, ForeignObjectRel) and getattr( field.field, '__prevent_search__', False): raise PermissionDenied( _('Filtering on %s is not allowed.' % name)) elif getattr(field, '__prevent_search__', False): raise PermissionDenied( _('Filtering on %s is not allowed.' % name)) if field in field_set: # Field traversed twice, could create infinite JOINs, DoSing Tower raise ParseError( _('Loops not allowed in filters, detected on field {}.'). format(field.name)) field_set.add(field) model = getattr(field, 'related_model', None) or field.model if parts: new_parts.append(parts[-1]) new_lookup = '__'.join(new_parts) return field, new_lookup
def update_annotation(request) -> Response: try: annotation_id = int(request.data['annotation_id']) image_id = int(request.data['image_id']) annotation_type_id = int(request.data['annotation_type_id']) vector = request.data['vector'] blurred = request.data['blurred'] concealed = request.data['concealed'] except (KeyError, TypeError, ValueError): raise ParseError annotation = get_object_or_404(Annotation, pk=annotation_id) annotation_type = get_object_or_404(AnnotationType, pk=annotation_type_id) if annotation.image_id != image_id: raise ParseError('the image id does not match the annotation id.') if not annotation.image.image_set.has_perm('edit_annotation', request.user): return Response( { 'detail': 'permission for updating annotations in this image set missing.', }, status=HTTP_403_FORBIDDEN) if not annotation_type.validate_vector(vector): serializer = AnnotationSerializer(annotation.image.annotations.filter( annotation_type__active=True).select_related().order_by( 'annotation_type__name'), context={ 'request': request, }, many=True) return Response( { 'annotations': serializer.data, 'detail': 'the vector is invalid.', }, status=HTTP_400_BAD_REQUEST) if Annotation.similar_annotations(vector, annotation.image, annotation_type, exclude={annotation.id}): annotation.delete() serializer = AnnotationSerializer(annotation.image.annotations.filter( annotation_type__active=True).select_related().order_by( 'annotation_type__name'), context={ 'request': request, }, many=True) return Response({ 'annotations': serializer.data, 'detail': 'similar annotation exists.', }) with transaction.atomic(): annotation.annotation_type = annotation_type annotation.vector = vector annotation._concealed = concealed annotation._blurred = blurred annotation.last_editor = request.user annotation.save() annotation.annotation_type = annotation_type # Automatically verify for owner annotation.verify(request.user, True) serializer = AnnotationSerializer(annotation.image.annotations.filter( annotation_type__active=True).select_related().filter( annotation_type__active=True).order_by('annotation_type__name'), context={ 'request': request, }, many=True) return Response({ 'annotations': serializer.data, }, status=HTTP_200_OK)
def check_valid_date(self, cron_time): date_format = settings.CELERY_BUSINESS_PARAMS.get('date_format') try: time.mktime(time.strptime(cron_time, date_format)) except Exception: raise ParseError(self.invalid_date_warning.format(cron_time))