def merge(cls, to_release, from_releases): # The following models reference release: # ReleaseCommit.release # ReleaseEnvironment.release_id # ReleaseProject.release # GroupRelease.release_id # GroupResolution.release # Group.first_release # ReleaseFile.release from sentry.models import ( Group, GroupRelease, GroupResolution, ReleaseCommit, ReleaseEnvironment, ReleaseFile, ReleaseProject, ReleaseProjectEnvironment, ) model_list = ( ReleaseCommit, ReleaseEnvironment, ReleaseFile, ReleaseProject, ReleaseProjectEnvironment, GroupRelease, GroupResolution, ) for release in from_releases: for model in model_list: if hasattr(model, "release"): update_kwargs = {"release": to_release} else: update_kwargs = {"release_id": to_release.id} try: with atomic_transaction(using=router.db_for_write(model)): model.objects.filter(release_id=release.id).update( **update_kwargs) except IntegrityError: for item in model.objects.filter(release_id=release.id): try: with atomic_transaction( using=router.db_for_write(model)): model.objects.filter(id=item.id).update( **update_kwargs) except IntegrityError: item.delete() Group.objects.filter(first_release=release).update( first_release=to_release) release.delete()
def save_avatar(cls, relation, type, avatar=None, filename=None, color=None): from sentry.models import File if avatar: with atomic_transaction(using=router.db_for_write(File)): photo = File.objects.create(name=filename, type=cls.FILE_TYPE) # XXX: Avatar may come in as a string instance in python2 # if it's not wrapped in BytesIO. if isinstance(avatar, str): avatar = BytesIO(force_bytes(avatar)) # XXX: Avatar processing may adjust file position; reset before saving. avatar.seek(0) photo.putfile(avatar) else: photo = None with atomic_transaction(using=( router.db_for_write(cls), router.db_for_write(File), )): if relation.get("sentry_app") and color is not None: instance, created = cls.objects.get_or_create(**relation, color=color) else: instance, created = cls.objects.get_or_create(**relation) file = instance.get_file() if file and photo: file.delete() if photo: instance.file_id = photo.id instance.ident = uuid4().hex instance.avatar_type = [ i for i, n in cls.AVATAR_TYPES if n == type ][0] instance.save() if photo and not created: instance.clear_cached_photos() return instance
def delete(self, request: Request, project) -> Response: """ Delete a specific Project's Debug Information File ``````````````````````````````````````````````````` Delete a debug information file for a given project. :pparam string organization_slug: the slug of the organization the file belongs to. :pparam string project_slug: the slug of the project to delete the DIF. :qparam string id: The id of the DIF to delete. :auth: required """ if request.GET.get("id") and (request.access.has_scope("project:write")): with atomic_transaction(using=router.db_for_write(File)): debug_file = ( ProjectDebugFile.objects.filter(id=request.GET.get("id"), project_id=project.id) .select_related("file") .first() ) if debug_file is not None: debug_file.delete() return Response(status=204) return Response(status=404)
def _get_or_create_impl(cls, project, version, date_added, metric_tags): from sentry.models import Project if date_added is None: date_added = timezone.now() cache_key = cls.get_cache_key(project.organization_id, version) release = cache.get(cache_key) if release in (None, -1): # TODO(dcramer): if the cache result is -1 we could attempt a # default create here instead of default get project_version = (f"{project.slug}-{version}")[:DB_VERSION_LENGTH] releases = list( cls.objects.filter( organization_id=project.organization_id, version__in=[version, project_version], projects=project, )) if releases: try: release = [ r for r in releases if r.version == project_version ][0] except IndexError: release = releases[0] metric_tags["created"] = "false" else: try: with atomic_transaction(using=router.db_for_write(cls)): release = cls.objects.create( organization_id=project.organization_id, version=version, date_added=date_added, total_deploys=0, ) metric_tags["created"] = "true" except IntegrityError: metric_tags["created"] = "false" release = cls.objects.get( organization_id=project.organization_id, version=version) release.add_project(project) if not project.flags.has_releases: project.flags.has_releases = True project.update( flags=F("flags").bitor(Project.flags.has_releases)) # TODO(dcramer): upon creating a new release, check if it should be # the new "latest release" for this project cache.set(cache_key, release, 3600) metric_tags["cache_hit"] = "false" else: metric_tags["cache_hit"] = "true" return release
def _upsert_release_file(file: File, archive: ReleaseArchive, update_fn, key_fields, additional_fields) -> bool: success = False release_file = None # Release files must have unique names within their release # and dist. If a matching file already exists, replace its # file with the new one; otherwise create it. try: release_file = ReleaseFile.objects.get(**key_fields) except ReleaseFile.DoesNotExist: try: with atomic_transaction(using=router.db_for_write(ReleaseFile)): release_file = ReleaseFile.objects.create( file=file, **dict(key_fields, **additional_fields)) except IntegrityError: # NB: This indicates a race, where another assemble task or # file upload job has just created a conflicting file. Since # we're upserting here anyway, yield to the faster actor and # do not try again. file.delete() else: success = True else: success = update_fn(release_file, file, archive, additional_fields) return success
def delete(self, request: Request, project) -> Response: """ Delete an Archive ``````````````````````````````````````````````````` Delete all artifacts inside given archive. :pparam string organization_slug: the slug of the organization the archive belongs to. :pparam string project_slug: the slug of the project to delete the archive of. :qparam string name: The name of the archive to delete. :auth: required """ archive_name = request.GET.get("name") if archive_name: with atomic_transaction(using=router.db_for_write(ReleaseFile)): release = Release.objects.get( organization_id=project.organization_id, projects=project, version=archive_name ) if release is not None: release_files = ReleaseFile.objects.filter(release_id=release.id) release_files.delete() return Response(status=204) return Response(status=404)
def store_export_chunk_as_blob(data_export, bytes_written, fileobj, blob_size=DEFAULT_BLOB_SIZE): try: with atomic_transaction(using=( router.db_for_write(FileBlob), router.db_for_write(ExportedDataBlob), )): # adapted from `putfile` in `src/sentry/models/file.py` bytes_offset = 0 while True: contents = fileobj.read(blob_size) if not contents: return bytes_offset blob_fileobj = ContentFile(contents) blob = FileBlob.from_file(blob_fileobj, logger=logger) ExportedDataBlob.objects.get_or_create(data_export=data_export, blob_id=blob.id, offset=bytes_written + bytes_offset) bytes_offset += blob.size # there is a maximum file size allowed, so we need to make sure we don't exceed it # NOTE: there seems to be issues with downloading files larger than 1 GB on slower # networks, limit the export to 1 GB for now to improve reliability if bytes_written + bytes_offset >= min(MAX_FILE_SIZE, 2**30): raise ExportDataFileTooBig() except ExportDataFileTooBig: return 0
def _ensure_blob_owned(blob): if organization is None: return try: with atomic_transaction(using=router.db_for_write(FileBlobOwner)): FileBlobOwner.objects.create(organization_id=organization.id, blob=blob) except IntegrityError: pass
def writable_data(self, create: bool, initial_artifact_count=None): """Context manager for editable artifact index""" with atomic_transaction(using=( router.db_for_write(ReleaseFile), router.db_for_write(File), )): created = False if create: releasefile, created = self._get_or_create_releasefile( initial_artifact_count) else: # Lock the row for editing: # NOTE: Do not select_related('file') here, because we do not # want to lock the File table qs = self._releasefile_qs().select_for_update() try: releasefile = qs[0] except IndexError: releasefile = None if releasefile is None: index_data = None else: if created: index_data = _ArtifactIndexData({}, fresh=True) else: source_file = releasefile.file if source_file.type != ARTIFACT_INDEX_TYPE: raise RuntimeError( "Unexpected file type for artifact index") raw_data = json.load(source_file.getfile()) index_data = _ArtifactIndexData(raw_data) yield index_data # editable reference to index if index_data is not None and index_data.changed: if created: target_file = releasefile.file else: target_file = File.objects.create( name=ARTIFACT_INDEX_FILENAME, type=ARTIFACT_INDEX_TYPE) target_file.putfile( BytesIO(json.dumps(index_data.data).encode())) artifact_count = index_data.num_files if not created: # Update and clean existing old_file = releasefile.file releasefile.update(file=target_file, artifact_count=artifact_count) old_file.delete()
def add_project(self, project): """ Add a project to this release. Returns True if the project was added and did not already exist. """ from sentry.models import Project try: with atomic_transaction(using=router.db_for_write(ReleaseProject)): ReleaseProject.objects.create(project=project, release=self) if not project.flags.has_releases: project.flags.has_releases = True project.update(flags=F("flags").bitor(Project.flags.has_releases)) except IntegrityError: return False else: return True
def delete_unreferenced_blobs(blob_ids): from sentry.models import FileBlob, FileBlobIndex for blob_id in blob_ids: if FileBlobIndex.objects.filter(blob_id=blob_id).exists(): continue try: blob = FileBlob.objects.get(id=blob_id) except FileBlob.DoesNotExist: pass else: try: with atomic_transaction(using=router.db_for_write(FileBlob)): # Need to delete the record to ensure django hooks run. blob.delete() except IntegrityError: # Do nothing if the blob was deleted in another task, or # if had another reference added concurrently. pass
def assemble_from_file_blob_ids(self, file_blob_ids, checksum, commit=True): """ This creates a file, from file blobs and returns a temp file with the contents. """ tf = tempfile.NamedTemporaryFile() with atomic_transaction( using=( router.db_for_write(FileBlob), router.db_for_write(FileBlobIndex), ) ): file_blobs = FileBlob.objects.filter(id__in=file_blob_ids).all() # Ensure blobs are in the order and duplication as provided blobs_by_id = {blob.id: blob for blob in file_blobs} file_blobs = [blobs_by_id[blob_id] for blob_id in file_blob_ids] new_checksum = sha1(b"") offset = 0 for blob in file_blobs: FileBlobIndex.objects.create(file=self, blob=blob, offset=offset) with blob.getfile() as blobfile: for chunk in blobfile.chunks(): new_checksum.update(chunk) tf.write(chunk) offset += blob.size self.size = offset self.checksum = new_checksum.hexdigest() if checksum != self.checksum: raise AssembleChecksumMismatch("Checksum mismatch") metrics.timing("filestore.file-size", offset) if commit: self.save() tf.flush() tf.seek(0) return tf
def set_commits(self, commit_list): """ Bind a list of commits to this release. This will clear any existing commit log and replace it with the given commits. """ # Sort commit list in reverse order commit_list.sort(key=lambda commit: commit.get("timestamp", 0), reverse=True) # TODO(dcramer): this function could use some cleanup/refactoring as it's a bit unwieldy from sentry.models import ( Commit, CommitAuthor, Group, GroupLink, GroupResolution, GroupStatus, PullRequest, ReleaseCommit, ReleaseHeadCommit, Repository, ) from sentry.plugins.providers.repository import RepositoryProvider from sentry.tasks.integrations import kick_off_status_syncs # todo(meredith): implement for IntegrationRepositoryProvider commit_list = [ c for c in commit_list if not RepositoryProvider.should_ignore_commit(c.get("message", "")) ] lock_key = type(self).get_lock_key(self.organization_id, self.id) lock = locks.get(lock_key, duration=10) if lock.locked(): # Signal failure to the consumer rapidly. This aims to prevent the number # of timeouts and prevent web worker exhaustion when customers create # the same release rapidly for different projects. raise ReleaseCommitError with TimedRetryPolicy(10)(lock.acquire): start = time() with atomic_transaction(using=( router.db_for_write(type(self)), router.db_for_write(ReleaseCommit), router.db_for_write(Repository), router.db_for_write(CommitAuthor), router.db_for_write(Commit), )): # TODO(dcramer): would be good to optimize the logic to avoid these # deletes but not overly important ReleaseCommit.objects.filter(release=self).delete() authors = {} repos = {} commit_author_by_commit = {} head_commit_by_repo = {} latest_commit = None for idx, data in enumerate(commit_list): repo_name = data.get( "repository") or f"organization-{self.organization_id}" if repo_name not in repos: repos[ repo_name] = repo = Repository.objects.get_or_create( organization_id=self.organization_id, name=repo_name)[0] else: repo = repos[repo_name] author_email = data.get("author_email") if author_email is None and data.get("author_name"): author_email = (re.sub(r"[^a-zA-Z0-9\-_\.]*", "", data["author_name"]).lower() + "@localhost") author_email = truncatechars(author_email, 75) if not author_email: author = None elif author_email not in authors: author_data = {"name": data.get("author_name")} author, created = CommitAuthor.objects.get_or_create( organization_id=self.organization_id, email=author_email, defaults=author_data, ) if author.name != author_data["name"]: author.update(name=author_data["name"]) authors[author_email] = author else: author = authors[author_email] commit_data = {} # Update/set message and author if they are provided. if author is not None: commit_data["author"] = author if "message" in data: commit_data["message"] = data["message"] if "timestamp" in data: commit_data["date_added"] = data["timestamp"] commit, created = Commit.objects.get_or_create( organization_id=self.organization_id, repository_id=repo.id, key=data["id"], defaults=commit_data, ) if not created: commit_data = { key: value for key, value in commit_data.items() if getattr(commit, key) != value } if commit_data: commit.update(**commit_data) if author is None: author = commit.author commit_author_by_commit[commit.id] = author # Guard against patch_set being None patch_set = data.get("patch_set") or [] if patch_set: CommitFileChange.objects.bulk_create( [ CommitFileChange( organization_id=self.organization.id, commit=commit, filename=patched_file["path"], type=patched_file["type"], ) for patched_file in patch_set ], ignore_conflicts=True, ) try: with atomic_transaction( using=router.db_for_write(ReleaseCommit)): ReleaseCommit.objects.create( organization_id=self.organization_id, release=self, commit=commit, order=idx, ) except IntegrityError: pass if latest_commit is None: latest_commit = commit head_commit_by_repo.setdefault(repo.id, commit.id) self.update( commit_count=len(commit_list), authors=[ str(a_id) for a_id in ReleaseCommit.objects.filter( release=self, commit__author_id__isnull=False). values_list("commit__author_id", flat=True).distinct() ], last_commit_id=latest_commit.id if latest_commit else None, ) metrics.timing("release.set_commits.duration", time() - start) # fill any missing ReleaseHeadCommit entries for repo_id, commit_id in head_commit_by_repo.items(): try: with atomic_transaction( using=router.db_for_write(ReleaseHeadCommit)): ReleaseHeadCommit.objects.create( organization_id=self.organization_id, release_id=self.id, repository_id=repo_id, commit_id=commit_id, ) except IntegrityError: pass release_commits = list( ReleaseCommit.objects.filter( release=self).select_related("commit").values( "commit_id", "commit__key")) commit_resolutions = list( GroupLink.objects.filter( linked_type=GroupLink.LinkedType.commit, linked_id__in=[rc["commit_id"] for rc in release_commits], ).values_list("group_id", "linked_id")) commit_group_authors = [ (cr[0], commit_author_by_commit.get(cr[1])) for cr in commit_resolutions # group_id ] pr_ids_by_merge_commit = list( PullRequest.objects.filter( merge_commit_sha__in=[ rc["commit__key"] for rc in release_commits ], organization_id=self.organization_id, ).values_list("id", flat=True)) pull_request_resolutions = list( GroupLink.objects.filter( relationship=GroupLink.Relationship.resolves, linked_type=GroupLink.LinkedType.pull_request, linked_id__in=pr_ids_by_merge_commit, ).values_list("group_id", "linked_id")) pr_authors = list( PullRequest.objects.filter( id__in=[prr[1] for prr in pull_request_resolutions ]).select_related("author")) pr_authors_dict = {pra.id: pra.author for pra in pr_authors} pull_request_group_authors = [(prr[0], pr_authors_dict.get(prr[1])) for prr in pull_request_resolutions] user_by_author = {None: None} commits_and_prs = list( itertools.chain(commit_group_authors, pull_request_group_authors)) group_project_lookup = dict( Group.objects.filter( id__in=[group_id for group_id, _ in commits_and_prs]).values_list( "id", "project_id")) for group_id, author in commits_and_prs: if author not in user_by_author: try: user_by_author[author] = author.find_users()[0] except IndexError: user_by_author[author] = None actor = user_by_author[author] with atomic_transaction(using=( router.db_for_write(GroupResolution), router.db_for_write(Group), # inside the remove_group_from_inbox router.db_for_write(GroupInbox), router.db_for_write(Activity), )): GroupResolution.objects.create_or_update( group_id=group_id, values={ "release": self, "type": GroupResolution.Type.in_release, "status": GroupResolution.Status.resolved, "actor_id": actor.id if actor else None, }, ) group = Group.objects.get(id=group_id) group.update(status=GroupStatus.RESOLVED) remove_group_from_inbox(group, action=GroupInboxRemoveAction.RESOLVED, user=actor) record_group_history(group, GroupHistoryStatus.RESOLVED, actor=actor) metrics.incr("group.resolved", instance="in_commit", skip_internal=True) issue_resolved.send_robust( organization_id=self.organization_id, user=actor, group=group, project=group.project, resolution_type="with_commit", sender=type(self), ) kick_off_status_syncs.apply_async( kwargs={ "project_id": group_project_lookup[group_id], "group_id": group_id })
def post_releasefile(request, release, logger): if "file" not in request.data: return Response({"detail": "Missing uploaded file"}, status=400) fileobj = request.data["file"] full_name = request.data.get("name", fileobj.name) if not full_name or full_name == "file": return Response({"detail": "File name must be specified"}, status=400) name = full_name.rsplit("/", 1)[-1] if _filename_re.search(name): return Response( { "detail": "File name must not contain special whitespace characters" }, status=400) dist_name = request.data.get("dist") dist = None if dist_name: dist = release.add_dist(dist_name) # Quickly check for the presence of this file before continuing with # the costly file upload process. if ReleaseFile.objects.filter( organization_id=release.organization_id, release_id=release.id, name=full_name, dist_id=dist.id if dist else dist, ).exists(): return Response({"detail": ERR_FILE_EXISTS}, status=409) headers = {"Content-Type": fileobj.content_type} for headerval in request.data.getlist("header") or (): try: k, v = headerval.split(":", 1) except ValueError: return Response( {"detail": "header value was not formatted correctly"}, status=400) else: if _filename_re.search(v): return Response( { "detail": "header value must not contain special whitespace characters" }, status=400, ) headers[k] = v.strip() file = File.objects.create(name=name, type="release.file", headers=headers) file.putfile(fileobj, logger=logger) try: with atomic_transaction(using=router.db_for_write(ReleaseFile)): releasefile = ReleaseFile.objects.create( organization_id=release.organization_id, release_id=release.id, file=file, name=full_name, dist_id=dist.id if dist else dist, ) except IntegrityError: file.delete() return Response({"detail": ERR_FILE_EXISTS}, status=409) return Response(serialize(releasefile, request.user), status=201)
def dispatch(self, request): try: event_id = request.GET["eventId"] except KeyError: return self._smart_response( request, {"eventId": "Missing or invalid parameter."}, status=400) normalized_event_id = normalize_event_id(event_id) if normalized_event_id: event_id = normalized_event_id elif event_id: return self._smart_response( request, {"eventId": "Missing or invalid parameter."}, status=400) key = self._get_project_key(request) if not key: return self._smart_response( request, {"dsn": "Missing or invalid parameter."}, status=404) origin = self._get_origin(request) if not is_valid_origin(origin, key.project): return self._smart_response(request, status=403) if request.method == "OPTIONS": return self._smart_response(request) # customization options options = DEFAULT_OPTIONS.copy() for name in options.keys(): if name in request.GET: options[name] = str(request.GET[name]) # TODO(dcramer): since we can't use a csrf cookie we should at the very # least sign the request / add some kind of nonce initial = { "name": request.GET.get("name"), "email": request.GET.get("email") } form = UserReportForm( request.POST if request.method == "POST" else None, initial=initial) if form.is_valid(): # TODO(dcramer): move this to post to the internal API report = form.save(commit=False) report.project_id = key.project_id report.event_id = event_id event = eventstore.get_event_by_id(report.project_id, report.event_id) if event is not None: report.environment_id = event.get_environment().id report.group_id = event.group_id try: with atomic_transaction(using=router.db_for_write(UserReport)): report.save() except IntegrityError: # There was a duplicate, so just overwrite the existing # row with the new one. The only way this ever happens is # if someone is messing around with the API, or doing # something wrong with the SDK, but this behavior is # more reasonable than just hard erroring and is more # expected. UserReport.objects.filter(project_id=report.project_id, event_id=report.event_id).update( name=report.name, email=report.email, comments=report.comments, date_added=timezone.now(), ) else: if report.group_id: report.notify() user_feedback_received.send( project=Project.objects.get(id=report.project_id), sender=self, ) return self._smart_response(request) elif request.method == "POST": return self._smart_response(request, {"errors": dict(form.errors)}, status=400) show_branding = (ProjectOption.objects.get_value( project=key.project, key="feedback:branding", default="1") == "1") template = render_to_string( "sentry/error-page-embed.html", context={ "form": form, "show_branding": show_branding, "title": options["title"], "subtitle": options["subtitle"], "subtitle2": options["subtitle2"], "name_label": options["labelName"], "email_label": options["labelEmail"], "comments_label": options["labelComments"], "submit_label": options["labelSubmit"], "close_label": options["labelClose"], }, ) context = { "endpoint": mark_safe("*/" + json.dumps(absolute_uri(request.get_full_path())) + ";/*"), "template": mark_safe("*/" + json.dumps(template) + ";/*"), "strings": mark_safe("*/" + json.dumps_htmlsafe( { "generic_error": str(options["errorGeneric"]), "form_error": str(options["errorFormEntry"]), "sent_message": str(options["successMessage"]), }) + ";/*"), } return render_to_response("sentry/error-page-embed.js", context, request, content_type="text/javascript")
def merge_export_blobs(data_export_id, **kwargs): with sentry_sdk.start_span(op="merge"): try: data_export = ExportedData.objects.get(id=data_export_id) except ExportedData.DoesNotExist as error: logger.exception(error) return with sentry_sdk.configure_scope() as scope: if data_export.user: user = {} if data_export.user.id: user["id"] = data_export.user.id if data_export.user.username: user["username"] = data_export.user.username if data_export.user.email: user["email"] = data_export.user.email scope.user = user scope.set_tag("organization.slug", data_export.organization.slug) scope.set_tag("export.type", ExportQueryType.as_str(data_export.query_type)) scope.set_extra("export.query", data_export.query_info) # adapted from `putfile` in `src/sentry/models/file.py` try: with atomic_transaction(using=( router.db_for_write(File), router.db_for_write(FileBlobIndex), )): file = File.objects.create( name=data_export.file_name, type="export.csv", headers={"Content-Type": "text/csv"}, ) size = 0 file_checksum = sha1(b"") for export_blob in ExportedDataBlob.objects.filter( data_export=data_export).order_by("offset"): blob = FileBlob.objects.get(pk=export_blob.blob_id) FileBlobIndex.objects.create(file=file, blob=blob, offset=size) size += blob.size blob_checksum = sha1(b"") for chunk in blob.getfile().chunks(): blob_checksum.update(chunk) file_checksum.update(chunk) if blob.checksum != blob_checksum.hexdigest(): raise AssembleChecksumMismatch("Checksum mismatch") file.size = size file.checksum = file_checksum.hexdigest() file.save() # This is in a separate atomic transaction because in prod, files exist # outside of the primary database which means that the transaction to # the primary database is idle the entire time the writes the the files # database is happening. In the event the writes to the files database # takes longer than the idle timeout, the connection to the primary # database can timeout causing a failure. with atomic_transaction( using=router.db_for_write(ExportedData)): data_export.finalize_upload(file=file) time_elapsed = (timezone.now() - data_export.date_added).total_seconds() metrics.timing("dataexport.duration", time_elapsed, sample_rate=1.0) logger.info("dataexport.end", extra={"data_export_id": data_export_id}) metrics.incr("dataexport.end", tags={"success": True}, sample_rate=1.0) except Exception as error: metrics.incr("dataexport.error", tags={"error": str(error)}, sample_rate=1.0) metrics.incr( "dataexport.end", tags={ "success": False, "error": str(error) }, sample_rate=1.0, ) logger.error( "dataexport.error: %s", str(error), extra={ "query": data_export.payload, "org": data_export.organization_id }, ) capture_exception(error) if isinstance(error, IntegrityError): message = "Failed to save the assembled file." else: message = "Internal processing failure." return data_export.email_failure(message=message)
def save_userreport(project, report, start_time=None): if start_time is None: start_time = timezone.now() # XXX(dcramer): enforce case insensitivity by coercing this to a lowercase string report["event_id"] = report["event_id"].lower() report["project_id"] = project.id event = eventstore.get_event_by_id(project.id, report["event_id"]) # TODO(dcramer): we should probably create the user if they dont # exist, and ideally we'd also associate that with the event euser = find_event_user(report, event) if euser and not euser.name and report.get("name"): euser.update(name=report["name"]) if euser: report["event_user_id"] = euser.id if event: # if the event is more than 30 minutes old, we don't allow updates # as it might be abusive if event.datetime < start_time - timedelta(minutes=30): raise Conflict("Feedback for this event cannot be modified.") report["environment_id"] = event.get_environment().id report["group_id"] = event.group_id try: with atomic_transaction(using=router.db_for_write(UserReport)): report_instance = UserReport.objects.create(**report) except IntegrityError: # There was a duplicate, so just overwrite the existing # row with the new one. The only way this ever happens is # if someone is messing around with the API, or doing # something wrong with the SDK, but this behavior is # more reasonable than just hard erroring and is more # expected. existing_report = UserReport.objects.get( project_id=report["project_id"], event_id=report["event_id"] ) # if the existing report was submitted more than 5 minutes ago, we dont # allow updates as it might be abusive (replay attacks) if existing_report.date_added < timezone.now() - timedelta(minutes=5): raise Conflict("Feedback for this event cannot be modified.") existing_report.update( name=report.get("name", ""), email=report["email"], comments=report["comments"], date_added=timezone.now(), event_user_id=euser.id if euser else None, ) report_instance = existing_report else: if report_instance.group_id: report_instance.notify() user_feedback_received.send(project=project, sender=save_userreport) return report_instance