def get(self, spectrum_id): """ --- description: Retrieve a spectrum tags: - spectra parameters: - in: path name: spectrum_id required: true schema: type: integer responses: 200: content: application/json: schema: SingleSpectrum 400: content: application/json: schema: Error """ spectrum = Spectrum.get_if_accessible_by(spectrum_id, self.current_user, raise_if_none=True) spec_dict = recursive_to_dict(spectrum) spec_dict["instrument_name"] = spectrum.instrument.name spec_dict["groups"] = spectrum.groups spec_dict["reducers"] = spectrum.reducers spec_dict["observers"] = spectrum.observers spec_dict["owner"] = spectrum.owner self.verify_and_commit() return self.success(data=spec_dict)
def get(self, spectrum_id): """ --- description: Retrieve a spectrum tags: - spectra parameters: - in: path name: spectrum_id required: true schema: type: integer responses: 200: content: application/json: schema: SingleSpectrum 400: content: application/json: schema: Error """ spectrum = Spectrum.get_if_accessible_by( spectrum_id, self.current_user, raise_if_none=True, ) comments = (CommentOnSpectrum.query_records_accessible_by( self.current_user, options=[joinedload(CommentOnSpectrum.groups)], ).filter(CommentOnSpectrum.spectrum_id == spectrum_id).all()) annotations = (AnnotationOnSpectrum.query_records_accessible_by( self.current_user).filter( AnnotationOnSpectrum.spectrum_id == spectrum_id).all()) spec_dict = recursive_to_dict(spectrum) spec_dict["instrument_name"] = spectrum.instrument.name spec_dict["groups"] = spectrum.groups spec_dict["reducers"] = spectrum.reducers spec_dict["observers"] = spectrum.observers spec_dict["owner"] = spectrum.owner spec_dict["comments"] = comments spec_dict["annotations"] = annotations external_reducer = (DBSession().query( SpectrumReducer.external_reducer).filter( SpectrumReducer.spectr_id == spectrum_id).first()) if external_reducer is not None: spec_dict["external_reducer"] = external_reducer[0] external_observer = (DBSession().query( SpectrumObserver.external_observer).filter( SpectrumObserver.spectr_id == spectrum_id).first()) if external_observer is not None: spec_dict["external_observer"] = external_observer[0] self.verify_and_commit() return self.success(data=spec_dict)
def get(self, obj_id=None): """ --- single: description: Retrieve a candidate tags: - candidates parameters: - in: path name: obj_id required: true schema: type: string - in: query name: includeComments nullable: true schema: type: boolean description: | Boolean indicating whether to include associated comments. Defaults to false. responses: 200: content: application/json: schema: SingleObj 400: content: application/json: schema: Error multiple: tags: - candidates description: Retrieve all candidates parameters: - in: query name: numPerPage nullable: true schema: type: integer description: | Number of candidates to return per paginated request. Defaults to 25. Capped at 500. - in: query name: pageNumber nullable: true schema: type: integer description: Page number for paginated query results. Defaults to 1 - in: query name: totalMatches nullable: true schema: type: integer description: | Used only in the case of paginating query results - if provided, this allows for avoiding a potentially expensive query.count() call. - in: query name: savedStatus nullable: true schema: type: string enum: [all, savedToAllSelected, savedToAnySelected, savedToAnyAccessible, notSavedToAnyAccessible, notSavedToAnySelected, notSavedToAllSelected] description: | String indicating the saved status to filter candidate results for. Must be one of the enumerated values. - in: query name: startDate nullable: true schema: type: string description: | Arrow-parseable date string (e.g. 2020-01-01). If provided, filter by Candidate.passed_at >= startDate - in: query name: endDate nullable: true schema: type: string description: | Arrow-parseable date string (e.g. 2020-01-01). If provided, filter by Candidate.passed_at <= endDate - in: query name: groupIDs nullable: true schema: type: array items: type: integer explode: false style: simple description: | Comma-separated string of group IDs (e.g. "1,2"). Defaults to all of user's groups if filterIDs is not provided. - in: query name: filterIDs nullable: true schema: type: array items: type: integer explode: false style: simple description: | Comma-separated string of filter IDs (e.g. "1,2"). Defaults to all of user's groups' filters if groupIDs is not provided. - in: query name: annotationExcludeOrigin nullable: true schema: type: string description: | Only load objects that do not have annotations from this origin. If the annotationsExcludeOutdatedDate is also given, then annotations with this origin will still be loaded if they were modified before that date. - in: query name: annotationExcludeOutdatedDate nullable: true schema: type: string description: | An Arrow parseable string designating when an existing annotation is outdated. Only relevant if giving the annotationExcludeOrigin argument. Will treat objects with outdated annotations as if they did not have that annotation, so it will load an object if it doesn't have an annotation with the origin specified or if it does have it but the annotation modified date < annotationsExcludeOutdatedDate - in: query name: sortByAnnotationOrigin nullable: true schema: type: string description: | The origin of the Annotation to sort by - in: query name: sortByAnnotationKey nullable: true schema: type: string description: | The key of the Annotation data value to sort by - in: query name: sortByAnnotationOrder nullable: true schema: type: string description: | The sort order for annotations - either "asc" or "desc". Defaults to "asc". - in: query name: annotationFilterList nullable: true schema: type: array items: type: string explode: false style: simple description: | Comma-separated string of JSON objects representing annotation filters. Filter objects are expected to have keys { origin, key, value } for non-numeric value types, or { origin, key, min, max } for numeric values. - in: query name: includePhotometry nullable: true schema: type: boolean description: | Boolean indicating whether to include associated photometry. Defaults to false. - in: query name: includeSpectra nullable: true schema: type: boolean description: | Boolean indicating whether to include associated spectra. Defaults to false. - in: query name: includeComments nullable: true schema: type: boolean description: | Boolean indicating whether to include associated comments. Defaults to false. - in: query name: classifications nullable: true schema: type: array items: type: string explode: false style: simple description: | Comma-separated string of classification(s) to filter for candidates matching that/those classification(s). - in: query name: minRedshift nullable: true schema: type: number description: | If provided, return only candidates with a redshift of at least this value - in: query name: maxRedshift nullable: true schema: type: number description: | If provided, return only candidates with a redshift of at most this value - in: query name: listName nullable: true schema: type: string description: | Get only candidates saved to the querying user's list, e.g., "favorites". - in: query name: listNameReject nullable: true schema: type: string description: | Get only candidates that ARE NOT saved to the querying user's list, e.g., "rejected_candidates". responses: 200: content: application/json: schema: allOf: - $ref: '#/components/schemas/Success' - type: object properties: data: type: object properties: candidates: type: array items: allOf: - $ref: '#/components/schemas/Obj' - type: object properties: is_source: type: boolean totalMatches: type: integer pageNumber: type: integer numPerPage: type: integer 400: content: application/json: schema: Error """ user_accessible_group_ids = [g.id for g in self.current_user.accessible_groups] include_photometry = self.get_query_argument("includePhotometry", False) include_spectra = self.get_query_argument("includeSpectra", False) include_comments = self.get_query_argument("includeComments", False) if obj_id is not None: query_options = [joinedload(Obj.thumbnails), joinedload(Obj.photstats)] c = Obj.get_if_accessible_by( obj_id, self.current_user, options=query_options, ) if c is None: return self.error("Invalid ID") accessible_candidates = ( Candidate.query_records_accessible_by(self.current_user) .filter(Candidate.obj_id == obj_id) .all() ) filter_ids = [cand.filter_id for cand in accessible_candidates] passing_alerts = [ { "filter_id": cand.filter_id, "passing_alert_id": cand.passing_alert_id, "passed_at": cand.passed_at, } for cand in accessible_candidates ] candidate_info = recursive_to_dict(c) candidate_info["filter_ids"] = filter_ids candidate_info["passing_alerts"] = passing_alerts if include_comments: candidate_info["comments"] = sorted( Comment.query_records_accessible_by(self.current_user) .filter(Comment.obj_id == obj_id) .all(), key=lambda x: x.created_at, reverse=True, ) if include_photometry: candidate_info['photometry'] = ( Photometry.query_records_accessible_by( self.current_user, mode='read', options=[joinedload(Photometry.instrument)], ) .filter(Photometry.obj_id == obj_id) .all() ) if include_spectra: candidate_info['spectra'] = ( Spectrum.query_records_accessible_by( self.current_user, mode='read', options=[joinedload(Spectrum.instrument)], ) .filter(Spectrum.obj_id == obj_id) .all() ) candidate_info["annotations"] = sorted( Annotation.query_records_accessible_by(self.current_user).filter( Annotation.obj_id == obj_id ), key=lambda x: x.origin, ) candidate_info["is_source"] = ( Source.query_records_accessible_by(self.current_user) .filter(Source.obj_id == obj_id) .count() > 0 ) if candidate_info["is_source"]: source_subquery = ( Source.query_records_accessible_by(self.current_user) .filter(Source.obj_id == obj_id) .filter(Source.active.is_(True)) .subquery() ) candidate_info["saved_groups"] = ( Group.query_records_accessible_by(self.current_user) .join(source_subquery, Group.id == source_subquery.c.group_id) .all() ) candidate_info["classifications"] = ( Classification.query_records_accessible_by(self.current_user) .filter(Classification.obj_id == obj_id) .all() ) if len(c.photstats) > 0: candidate_info["last_detected_at"] = Time( c.photstats[-1].last_detected_mjd, format='mjd' ).datetime else: candidate_info["last_detected_at"] = None candidate_info["gal_lon"] = c.gal_lon_deg candidate_info["gal_lat"] = c.gal_lat_deg candidate_info["luminosity_distance"] = c.luminosity_distance candidate_info["dm"] = c.dm candidate_info["angular_diameter_distance"] = c.angular_diameter_distance candidate_info = recursive_to_dict(candidate_info) self.verify_and_commit() return self.success(data=candidate_info) page_number = self.get_query_argument("pageNumber", None) or 1 n_per_page = self.get_query_argument("numPerPage", None) or 25 # Not documented in API docs as this is for frontend-only usage & will confuse # users looking through the API docs query_id = self.get_query_argument("queryID", None) saved_status = self.get_query_argument("savedStatus", "all") total_matches = self.get_query_argument("totalMatches", None) start_date = self.get_query_argument("startDate", None) end_date = self.get_query_argument("endDate", None) group_ids = self.get_query_argument("groupIDs", None) filter_ids = self.get_query_argument("filterIDs", None) annotation_exclude_origin = self.get_query_argument( 'annotationExcludeOrigin', None ) annotation_exclude_date = self.get_query_argument( 'annotationExcludeOutdatedDate', None ) sort_by_origin = self.get_query_argument("sortByAnnotationOrigin", None) annotation_filter_list = self.get_query_argument("annotationFilterList", None) classifications = self.get_query_argument("classifications", None) min_redshift = self.get_query_argument("minRedshift", None) max_redshift = self.get_query_argument("maxRedshift", None) list_name = self.get_query_argument('listName', None) list_name_reject = self.get_query_argument('listNameReject', None) with self.Session() as session: user_accessible_group_ids = [ g.id for g in self.current_user.accessible_groups ] user_accessible_filter_ids = [ filtr.id for g in self.current_user.accessible_groups for filtr in g.filters if g.filters is not None ] if group_ids is not None: if ( isinstance(group_ids, str) and "," in group_ids and set(group_ids).issubset(string.digits + ',') ): group_ids = [int(g_id) for g_id in group_ids.split(",")] elif isinstance(group_ids, str) and group_ids.isdigit(): group_ids = [int(group_ids)] else: return self.error( "Invalid groupIDs value -- select at least one group" ) filters = session.scalars( Filter.select(self.current_user).where( Filter.group_id.in_(group_ids) ) ).all() filter_ids = [f.id for f in filters] elif filter_ids is not None: if "," in filter_ids and set(filter_ids) in set(string.digits + ','): filter_ids = [int(f_id) for f_id in filter_ids.split(",")] elif filter_ids.isdigit(): filter_ids = [int(filter_ids)] else: return self.error("Invalid filterIDs paramter value.") filters = session.scalars( Filter.select(self.current_user).where(Filter.id.in_(filter_ids)) ).all() group_ids = [f.group_id for f in filters] else: # If 'groupIDs' & 'filterIDs' params not present in request, use all user groups group_ids = user_accessible_group_ids filter_ids = user_accessible_filter_ids try: page = int(page_number) except ValueError: return self.error("Invalid page number value.") try: n_per_page = int(n_per_page) except ValueError: return self.error("Invalid numPerPage value.") n_per_page = min(n_per_page, 500) initial_candidate_filter_criteria = [Candidate.filter_id.in_(filter_ids)] if start_date is not None and start_date.strip() not in [ "", "null", "undefined", ]: start_date = arrow.get(start_date).datetime initial_candidate_filter_criteria.append(Candidate.passed_at >= start_date) if end_date is not None and end_date.strip() not in ["", "null", "undefined"]: end_date = arrow.get(end_date).datetime initial_candidate_filter_criteria.append(Candidate.passed_at <= end_date) candidate_subquery = ( Candidate.query_records_accessible_by(self.current_user) .filter(*initial_candidate_filter_criteria) .subquery() ) # We'll join in the nested data for Obj (like photometry) later q = Obj.query_records_accessible_by(self.current_user).join( candidate_subquery, Obj.id == candidate_subquery.c.obj_id ) if sort_by_origin is not None or annotation_filter_list is not None: q = q.outerjoin(Annotation) if classifications is not None: if isinstance(classifications, str) and "," in classifications: classifications = [c.strip() for c in classifications.split(",")] elif isinstance(classifications, str): classifications = [classifications] else: return self.error( "Invalid classifications value -- must provide at least one string value" ) q = q.join(Classification).filter( Classification.classification.in_(classifications) ) if sort_by_origin is None: # Don't apply the order by just yet. Save it so we can pass it to # the LIMT/OFFSET helper function down the line once other query # params are set. order_by = [candidate_subquery.c.passed_at.desc().nullslast(), Obj.id] if saved_status in [ "savedToAllSelected", "savedToAnySelected", "savedToAnyAccessible", "notSavedToAnyAccessible", "notSavedToAnySelected", "notSavedToAllSelected", ]: notin = False active_sources = Source.query_records_accessible_by( self.current_user, columns=[Source.obj_id] ).filter(Source.active.is_(True)) if saved_status == "savedToAllSelected": # Retrieve objects that have as many active saved groups that are # in 'group_ids' as there are items in 'group_ids' subquery = ( active_sources.filter(Source.group_id.in_(group_ids)) .group_by(Source.obj_id) .having(func.count(Source.group_id) == len(group_ids)) ) elif saved_status == "savedToAnySelected": subquery = active_sources.filter(Source.group_id.in_(group_ids)) elif saved_status == "savedToAnyAccessible": subquery = active_sources.filter( Source.group_id.in_(user_accessible_group_ids) ) elif saved_status == "notSavedToAnyAccessible": subquery = active_sources.filter( Source.group_id.in_(user_accessible_group_ids) ) notin = True elif saved_status == "notSavedToAnySelected": subquery = active_sources.filter(Source.group_id.in_(group_ids)) notin = True elif saved_status == "notSavedToAllSelected": # Retrieve objects that have as many active saved groups that are # in 'group_ids' as there are items in 'group_ids', and select # the objects not in that set subquery = ( active_sources.filter(Source.group_id.in_(group_ids)) .group_by(Source.obj_id) .having(func.count(Source.group_id) == len(group_ids)) ) notin = True q = ( q.filter(Obj.id.notin_(subquery)) if notin else q.filter(Obj.id.in_(subquery)) ) elif saved_status != "all": return self.error( f"Invalid savedStatus: {saved_status}. Must be one of the enumerated options." ) if min_redshift is not None: try: min_redshift = float(min_redshift) except ValueError: return self.error( "Invalid values for minRedshift - could not convert to float" ) q = q.filter(Obj.redshift >= min_redshift) if max_redshift is not None: try: max_redshift = float(max_redshift) except ValueError: return self.error( "Invalid values for maxRedshift - could not convert to float" ) q = q.filter(Obj.redshift <= max_redshift) if annotation_exclude_origin is not None: if annotation_exclude_date is None: right = ( Obj.query_records_accessible_by(self.current_user, columns=[Obj.id]) .join(Annotation) .filter(Annotation.origin == annotation_exclude_origin) .subquery() ) else: expire_date = arrow.get(annotation_exclude_date).datetime right = ( Obj.query_records_accessible_by(self.current_user, columns=[Obj.id]) .join(Annotation) .filter( Annotation.origin == annotation_exclude_origin, Annotation.modified >= expire_date, ) .subquery() ) q = q.outerjoin(right, Obj.id == right.c.id).filter(right.c.id.is_(None)) if list_name is not None: q = q.filter( Listing.list_name == list_name, Listing.user_id == self.associated_user_object.id, ) if list_name_reject is not None: right = ( Obj.query_records_accessible_by(self.current_user, columns=[Obj.id]) .join(Listing) .filter( Listing.list_name == list_name_reject, Listing.user_id == self.associated_user_object.id, ) .subquery() ) q = q.outerjoin(right, Obj.id == right.c.id).filter(right.c.id.is_(None)) if annotation_filter_list is not None: # Parse annotation filter list objects from the query string # and apply the filters to the query for item in re.split(r",(?={)", annotation_filter_list): try: new_filter = json.loads(item) except json.decoder.JSONDecodeError: return self.error( "Could not parse JSON objects for annotation filtering" ) if "origin" not in new_filter: self.error( f"Invalid annotation filter list item {item}: \"origin\" is required." ) if "key" not in new_filter: self.error( f"Invalid annotation filter list item {item}: \"key\" is required." ) if "value" in new_filter: value = new_filter["value"] if isinstance(value, bool): q = q.filter( Annotation.origin == new_filter["origin"], Annotation.data[new_filter["key"]].astext.cast(Boolean) == value, ) else: # Test if the value is a nested object try: value = json.loads(value) # If a nested object, we put the value through the # JSON loads/dumps pipeline to get a string formatted # like Postgres will for its JSONB ->> text operation # For some reason, for example, not doing this will # have value = { "key": "value" } (with the extra # spaces around the braces) and cause the filter to # fail. value = json.dumps(value) except json.decoder.JSONDecodeError: # If not, this is just a string field and we don't # need the string formatting above pass q = q.filter( Annotation.origin == new_filter["origin"], Annotation.data[new_filter["key"]].astext == value, ) elif "min" in new_filter and "max" in new_filter: try: min_value = float(new_filter["min"]) max_value = float(new_filter["max"]) q = q.filter( Annotation.origin == new_filter["origin"], Annotation.data[new_filter["key"]].cast(Float) >= min_value, Annotation.data[new_filter["key"]].cast(Float) <= max_value, ) except ValueError: return self.error( f"Invalid annotation filter list item: {item}. The min/max provided is not a valid number." ) else: return self.error( f"Invalid annotation filter list item: {item}. Should have either \"value\" or \"min\" and \"max\"" ) if sort_by_origin is not None: sort_by_key = self.get_query_argument("sortByAnnotationKey", None) sort_by_order = self.get_query_argument("sortByAnnotationOrder", None) # Define a custom sort order to have annotations from the correct # origin first, all others afterwards origin_sort_order = case( value=Annotation.origin, whens={sort_by_origin: 1}, else_=None, ) annotation_sort_criterion = ( Annotation.data[sort_by_key].desc().nullslast() if sort_by_order == "desc" else Annotation.data[sort_by_key].nullslast() ) # Don't apply the order by just yet. Save it so we can pass it to # the LIMT/OFFSET helper function. order_by = [ origin_sort_order.nullslast(), annotation_sort_criterion, candidate_subquery.c.passed_at.desc().nullslast(), Obj.id, ] try: query_results = grab_query_results( q, total_matches, page, n_per_page, "candidates", order_by=order_by, query_id=query_id, use_cache=True, include_detection_stats=True, ) except ValueError as e: if "Page number out of range" in str(e): return self.error("Page number out of range.") raise matching_source_ids = ( Source.query_records_accessible_by( self.current_user, columns=[Source.obj_id] ) .filter(Source.obj_id.in_([obj.id for obj, in query_results["candidates"]])) .all() ) candidate_list = [] for (obj,) in query_results["candidates"]: with DBSession().no_autoflush: obj.is_source = (obj.id,) in matching_source_ids if obj.is_source: source_subquery = ( Source.query_records_accessible_by(self.current_user) .filter(Source.obj_id == obj.id) .filter(Source.active.is_(True)) .subquery() ) obj.saved_groups = ( Group.query_records_accessible_by(self.current_user) .join(source_subquery, Group.id == source_subquery.c.group_id) .all() ) obj.classifications = ( Classification.query_records_accessible_by(self.current_user) .filter(Classification.obj_id == obj.id) .all() ) obj.passing_group_ids = [ f.group_id for f in ( Filter.query_records_accessible_by(self.current_user).filter( Filter.id.in_( Candidate.query_records_accessible_by( self.current_user, columns=[Candidate.filter_id] ).filter(Candidate.obj_id == obj.id) ) ) ) ] candidate_list.append(recursive_to_dict(obj)) if include_photometry: candidate_list[-1]["photometry"] = ( Photometry.query_records_accessible_by( self.current_user, mode='read', options=[joinedload(Photometry.instrument)], ) .filter(Photometry.obj_id == obj.id) .all() ) if include_spectra: candidate_list[-1]["spectra"] = ( Spectrum.query_records_accessible_by( self.current_user, mode='read', options=[joinedload(Spectrum.instrument)], ) .filter(Spectrum.obj_id == obj.id) .all() ) if include_comments: candidate_list[-1]["comments"] = sorted( Comment.query_records_accessible_by(self.current_user) .filter(Comment.obj_id == obj.id) .all(), key=lambda x: x.created_at, reverse=True, ) unordered_annotations = sorted( Annotation.query_records_accessible_by(self.current_user) .filter(Annotation.obj_id == obj.id) .all(), key=lambda x: x.origin, ) selected_groups_annotations = [] other_annotations = [] for annotation in unordered_annotations: if set(group_ids).intersection( {group.id for group in annotation.groups} ): selected_groups_annotations.append(annotation) else: other_annotations.append(annotation) candidate_list[-1]["annotations"] = ( selected_groups_annotations + other_annotations ) if len(obj.photstats) > 0: candidate_list[-1]["last_detected_at"] = Time( obj.photstats[-1].last_detected_mjd, format='mjd' ).datetime else: candidate_list[-1]["last_detected_at"] = None candidate_list[-1]["gal_lat"] = obj.gal_lat_deg candidate_list[-1]["gal_lon"] = obj.gal_lon_deg candidate_list[-1]["luminosity_distance"] = obj.luminosity_distance candidate_list[-1]["dm"] = obj.dm candidate_list[-1][ "angular_diameter_distance" ] = obj.angular_diameter_distance query_results["candidates"] = candidate_list query_results = recursive_to_dict(query_results) self.verify_and_commit() return self.success(data=query_results)
def get(self, run_id=None): """ --- single: description: Retrieve an observing run tags: - observing_runs parameters: - in: path name: run_id required: true schema: type: integer responses: 200: content: application/json: schema: SingleObservingRunGetWithAssignments 400: content: application/json: schema: Error multiple: description: Retrieve all observing runs tags: - observing_runs responses: 200: content: application/json: schema: ArrayOfObservingRuns 400: content: application/json: schema: Error """ if run_id is not None: # These are all read=public, including Objs options = [ joinedload(ObservingRun.assignments).joinedload( ClassicalAssignment.obj).joinedload(Obj.thumbnails), joinedload(ObservingRun.assignments).joinedload( ClassicalAssignment.requester), joinedload(ObservingRun.instrument).joinedload( Instrument.telescope), ] run = ObservingRun.get_if_accessible_by( run_id, self.current_user, mode="read", raise_if_none=True, options=options, ) # order the assignments by ra assignments = sorted(run.assignments, key=lambda a: a.obj.ra) data = ObservingRunGetWithAssignments.dump(run) data["assignments"] = [a.to_dict() for a in assignments] for a in data["assignments"]: a['accessible_group_names'] = [ (s.group.nickname if s.group.nickname is not None else s.group.name) for s in Source.query_records_accessible_by( self.current_user, mode="read").filter( Source.obj_id == a["obj"].id).all() ] del a['obj'].sources del a['obj'].users # vectorized calculation of ephemerides if len(data["assignments"]) > 0: targets = [a['obj'].target for a in data["assignments"]] rise_times = run.rise_time(targets).isot set_times = run.set_time(targets).isot for d, rt, st in zip(data["assignments"], rise_times, set_times): d["rise_time_utc"] = rt if rt is not np.ma.masked else '' d["set_time_utc"] = st if st is not np.ma.masked else '' data = recursive_to_dict(data) self.verify_and_commit() return self.success(data=data) runs = (ObservingRun.query_records_accessible_by( self.current_user, mode="read").order_by(ObservingRun.calendar_date.asc()).all()) runs_list = [] for run in runs: runs_list.append(run.to_dict()) runs_list[-1][ "run_end_utc"] = run.instrument.telescope.next_sunrise( run.calendar_noon).isot self.verify_and_commit() return self.success(data=runs_list)
def get(self, analysis_resource_type, analysis_id=None): """ --- single: description: Retrieve an Analysis by id tags: - analysis parameters: - in: path name: analysis_resource_type required: true schema: type: string description: | What underlying data the analysis is on: must be one of either "obj" (more to be added in the future) - in: path name: analysis_id required: true schema: type: integer - in: query name: includeAnalysisData nullable: true schema: type: boolean description: | Boolean indicating whether to include the data associated with the analysis in the response. Could be a large amount of data. Only works for single analysis requests. Defaults to false. - in: query name: includeFilename nullable: true schema: type: boolean description: | Boolean indicating whether to include the filename of the data associated with the analysis in the response. Defaults to false. responses: 200: content: application/json: schema: SingleObjAnalysis 400: content: application/json: schema: Error multiple: description: Retrieve all Analyses tags: - analysis responses: 200: content: application/json: schema: ArrayOfObjAnalysiss 400: content: application/json: schema: Error """ include_analysis_data = self.get_query_argument( "includeAnalysisData", False) in ["True", "t", "true", "1", True, 1] include_filename = self.get_query_argument("includeFilename", False) in [ "True", "t", "true", "1", True, 1, ] if analysis_resource_type.lower() == 'obj': if analysis_id is not None: try: s = ObjAnalysis.get_if_accessible_by(analysis_id, self.current_user, raise_if_none=True) except AccessError: return self.error('Cannot access this Analysis.', status=403) analysis_dict = recursive_to_dict(s) if include_filename: analysis_dict["filename"] = s._full_name analysis_dict["groups"] = s.groups if include_analysis_data: analysis_dict["data"] = s.data return self.success(data=analysis_dict) # retrieve multiple analyses analyses = ObjAnalysis.get_records_accessible_by(self.current_user) self.verify_and_commit() ret_array = [] for a in analyses: analysis_dict = recursive_to_dict(a) analysis_dict["groups"] = a.groups if include_filename: analysis_dict["filename"] = a._full_name ret_array.append(analysis_dict) else: return self.error( f'analysis_resource_type must be one of {", ".join(["obj"])}', status=404, ) return self.success(data=ret_array)
def get(self, analysis_service_id=None): """ --- single: description: Retrieve an Analysis Service by id tags: - analysis_services parameters: - in: path name: analysis_service_id required: true schema: type: integer responses: 200: content: application/json: schema: SingleAnalysisService 400: content: application/json: schema: Error multiple: description: Retrieve all Analysis Services tags: - analysis_services responses: 200: content: application/json: schema: ArrayOfAnalysisServices 400: content: application/json: schema: Error """ if analysis_service_id is not None: try: s = AnalysisService.get_if_accessible_by(analysis_service_id, self.current_user, raise_if_none=True) except AccessError: return self.error('Cannot access this Analysis Service.', status=403) analysis_dict = recursive_to_dict(s) analysis_dict["groups"] = s.groups return self.success(data=analysis_dict) # retrieve multiple services analysis_services = AnalysisService.get_records_accessible_by( self.current_user) self.verify_and_commit() ret_array = [] for a in analysis_services: analysis_dict = recursive_to_dict(a) analysis_dict["groups"] = a.groups ret_array.append(analysis_dict) return self.success(data=ret_array)
def get(self, obj_id): """ --- description: Retrieve all spectra associated with an Object tags: - spectra parameters: - in: path name: obj_id required: true schema: type: string description: ID of the object to retrieve spectra for - in: query name: normalization required: false schema: type: string description: | what normalization is needed for the spectra (e.g., "median"). If omitted, returns the original spectrum. Options for normalization are: - median: normalize the flux to have median==1 responses: 200: content: application/json: schema: allOf: - $ref: '#/components/schemas/Success' - type: object properties: data: type: object properties: obj_id: type: string description: The ID of the requested Obj spectra: type: array items: $ref: '#/components/schemas/Spectrum' 400: content: application/json: schema: Error """ obj = Obj.get_if_accessible_by(obj_id, self.current_user) if obj is None: return self.error('Invalid object ID.') spectra = (Spectrum.query_records_accessible_by( self.current_user).filter(Spectrum.obj_id == obj_id).all()) return_values = [] for spec in spectra: spec_dict = recursive_to_dict(spec) spec_dict["instrument_name"] = spec.instrument.name spec_dict["groups"] = spec.groups spec_dict["reducers"] = spec.reducers spec_dict["observers"] = spec.observers spec_dict["owner"] = spec.owner return_values.append(spec_dict) normalization = self.get_query_argument('normalization', None) if normalization is not None: if normalization == "median": for s in return_values: norm = np.median(np.abs(s["fluxes"])) norm = norm if norm != 0.0 else 1e-20 if not (np.isfinite(norm) and norm > 0): # otherwise normalize the value at the median wavelength to 1 median_wave_index = np.argmin( np.abs(s["wavelengths"] - np.median(s["wavelengths"]))) norm = s["fluxes"][median_wave_index] s["fluxes"] = s["fluxes"] / norm else: return self.error( f'Invalid "normalization" value "{normalization}, use ' '"median" or None') self.verify_and_commit() return self.success(data={'obj_id': obj.id, 'spectra': return_values})
def post(self, spectrum_id): """ --- description: Submit a (classification) spectrum to TNS tags: - spectra parameters: - in: path name: spectrum_id required: true schema: type: integer - in: query name: tnsrobotID schema: type: int required: true description: | SkyPortal TNS Robot ID - in: query name: classificationID schema: type: string description: | Classification ID (see TNS documentation at https://www.wis-tns.org/content/tns-getting-started for options) - in: query name: classifiers schema: type: string description: | List of those performing classification. - in: query name: spectrumType schema: type: string description: | Type of spectrum that this is. Valid options are: ['object', 'host', 'sky', 'arcs', 'synthetic'] - in: query name: spectrumComment schema: type: string description: | Comment on the spectrum. - in: query name: classificationComment schema: type: string description: | Comment on the classification. responses: 200: content: application/json: schema: SingleSpectrum 400: content: application/json: schema: Error """ data = self.get_json() tnsrobotID = data.get('tnsrobotID') classificationID = data.get('classificationID', None) classifiers = data.get('classifiers', '') spectrum_type = data.get('spectrumType', '') spectrum_comment = data.get('spectrumComment', '') classification_comment = data.get('classificationComment', '') if tnsrobotID is None: return self.error('tnsrobotID is required') tnsrobot = DBSession().query(TNSRobot).filter(TNSRobot.id == tnsrobotID).one() altdata = tnsrobot.altdata if not altdata: raise ValueError('Missing TNS information.') spectrum = Spectrum.get_if_accessible_by( spectrum_id, self.current_user, ) if spectrum is None: return self.error(f'No spectrum with ID {spectrum_id}') spec_dict = recursive_to_dict(spectrum) spec_dict["instrument_name"] = spectrum.instrument.name spec_dict["groups"] = spectrum.groups spec_dict["reducers"] = spectrum.reducers spec_dict["observers"] = spectrum.observers spec_dict["owner"] = spectrum.owner external_reducer = ( DBSession() .query(SpectrumReducer.external_reducer) .filter(SpectrumReducer.spectr_id == spectrum_id) .first() ) if external_reducer is not None: spec_dict["external_reducer"] = external_reducer[0] external_observer = ( DBSession() .query(SpectrumObserver.external_observer) .filter(SpectrumObserver.spectr_id == spectrum_id) .first() ) if external_observer is not None: spec_dict["external_observer"] = external_observer[0] tns_headers = { 'User-Agent': f'tns_marker{"tns_id":{tnsrobot.bot_id},"type":"bot", "name":"{tnsrobot.bot_name}"}' } tns_prefix, tns_name = get_IAUname( spectrum.obj.id, altdata['api_key'], tns_headers ) if tns_name is None: return self.error('TNS name missing... please first post to TNS.') if spectrum.obj.redshift: redshift = spectrum.obj.redshift spectype_id = ['object', 'host', 'sky', 'arcs', 'synthetic'].index( spectrum_type ) + 1 if spec_dict["altdata"] is not None: header = spec_dict["altdata"] exposure_time = header['EXPTIME'] else: exposure_time = None wav = spec_dict['wavelengths'] flux = spec_dict['fluxes'] err = spec_dict['errors'] filename = f'{spectrum.instrument.name}.{spectrum_id}' filetype = 'ascii' with tempfile.NamedTemporaryFile( prefix=filename, suffix=f'.{filetype}', mode='w', ) as f: if err is not None: for i in range(len(wav)): f.write(f'{wav[i]} \t {flux[i]} \t {err[i]} \n') else: for i in range(len(wav)): f.write(f'{wav[i]} \t {flux[i]}\n') f.flush() data = {'api_key': altdata['api_key']} if filetype == 'ascii': files = [('files[]', (filename, open(f.name), 'text/plain'))] elif filetype == 'fits': files = [ ('files[0]', (filename, open(f.name, 'rb'), 'application/fits')) ] r = requests.post(upload_url, headers=tns_headers, data=data, files=files) if r.status_code != 200: return self.error(f'{r.content}') spectrumdict = { 'instrumentid': spectrum.instrument.tns_id, 'observer': spec_dict["observers"], 'reducer': spec_dict["reducers"], 'spectypeid': spectype_id, 'ascii_file': filename, 'fits_file': '', 'remarks': spectrum_comment, 'spec_proprietary_period': 0.0, 'obsdate': spec_dict['observed_at'], } if exposure_time is not None: spectrumdict['exptime'] = exposure_time classification_report = { 'name': tns_name, 'classifier': classifiers, 'objtypeid': classificationID, 'groupid': tnsrobot.source_group_id, 'remarks': classification_comment, 'spectra': {'spectra-group': {'0': spectrumdict}}, } if redshift is not None: classification_report['redshift'] = redshift classificationdict = {'classification_report': {'0': classification_report}} data = { 'api_key': altdata['api_key'], 'data': json.dumps(classificationdict), } r = requests.post(report_url, headers=tns_headers, data=data) if r.status_code == 200: tns_id = r.json()['data']['report_id'] return self.success(data={'tns_id': tns_id}) else: return self.error(f'{r.content}')
def get(self, obj_id): """ --- description: Retrieve all spectra associated with an Object tags: - spectra parameters: - in: path name: obj_id required: true schema: type: string description: ID of the object to retrieve spectra for - in: query name: normalization required: false schema: type: string description: | what normalization is needed for the spectra (e.g., "median"). If omitted, returns the original spectrum. Options for normalization are: - median: normalize the flux to have median==1 responses: 200: content: application/json: schema: allOf: - $ref: '#/components/schemas/Success' - type: object properties: data: type: object properties: obj_id: type: string description: The ID of the requested Obj spectra: type: array items: $ref: '#/components/schemas/Spectrum' 400: content: application/json: schema: Error """ obj = Obj.get_if_accessible_by(obj_id, self.current_user) if obj is None: return self.error('Invalid object ID.') spectra = (Spectrum.query_records_accessible_by( self.current_user).filter(Spectrum.obj_id == obj_id).all()) return_values = [] for spec in spectra: spec_dict = recursive_to_dict(spec) comments = (CommentOnSpectrum.query_records_accessible_by( self.current_user, options=[joinedload(CommentOnSpectrum.groups)], ).filter(CommentOnSpectrum.spectrum_id == spec.id).all()) spec_dict["comments"] = sorted( [{ **{ k: v for k, v in c.to_dict().items() if k != "attachment_bytes" }, "author": { **c.author.to_dict(), "gravatar_url": c.author.gravatar_url, }, } for c in comments], key=lambda x: x["created_at"], reverse=True, ) spec_dict["instrument_name"] = spec.instrument.name spec_dict["groups"] = spec.groups spec_dict["reducers"] = spec.reducers spec_dict["observers"] = spec.observers external_reducer = (DBSession().query( SpectrumReducer.external_reducer).filter( SpectrumReducer.spectr_id == spec.id).first()) if external_reducer is not None: spec_dict["external_reducer"] = external_reducer[0] external_observer = (DBSession().query( SpectrumObserver.external_observer).filter( SpectrumObserver.spectr_id == spec.id).first()) if external_observer is not None: spec_dict["external_observer"] = external_observer[0] spec_dict["owner"] = spec.owner return_values.append(spec_dict) normalization = self.get_query_argument('normalization', None) if normalization is not None: if normalization == "median": for s in return_values: norm = np.median(np.abs(s["fluxes"])) norm = norm if norm != 0.0 else 1e-20 if not (np.isfinite(norm) and norm > 0): # otherwise normalize the value at the median wavelength to 1 median_wave_index = np.argmin( np.abs(s["wavelengths"] - np.median(s["wavelengths"]))) norm = s["fluxes"][median_wave_index] s["fluxes"] = s["fluxes"] / norm else: return self.error( f'Invalid "normalization" value "{normalization}, use ' '"median" or None') self.verify_and_commit() return self.success(data={'obj_id': obj.id, 'spectra': return_values})
def get(self, spectrum_id=None): """ --- single: description: Retrieve a spectrum tags: - spectra parameters: - in: path name: spectrum_id required: true schema: type: integer responses: 200: content: application/json: schema: SingleSpectrum 403: content: application/json: schema: Error multiple: description: Retrieve multiple spectra with given criteria tags: - spectra parameters: - in: query name: minimalPayload nullable: true default: false schema: type: boolean description: | If true, return only the minimal metadata about each spectrum, instead of returning the potentially large payload that includes wavelength/flux and also comments and annotations. The metadata that is always included is: id, obj_id, owner_id, origin, type, label, observed_at, created_at, modified, instrument_id, instrument_name, original_file_name, followup_request_id, assignment_id, and altdata. - in: query name: observedBefore nullable: true schema: type: string description: | Arrow-parseable date string (e.g. 2020-01-01). If provided, return only spectra observed before this time. - in: query name: observedAfter nullable: true schema: type: string description: | Arrow-parseable date string (e.g. 2020-01-01). If provided, return only spectra observed after this time. - in: query name: objID nullable: true schema: type: string description: | Return any spectra on an object with ID that has a (partial) match to this argument (i.e., the given argument is "in" the object's ID). - in: query name: instrumentIDs nullable: true type: list items: type: integer description: | If provided, filter only spectra observed with one of these instrument IDs. - in: query name: groupIDs nullable: true schema: type: list items: type: integer description: | If provided, filter only spectra saved to one of these group IDs. - in: query name: followupRequestIDs nullable: true schema: type: list items: type: integer description: | If provided, filter only spectra associate with these followup request IDs. - in: query name: assignmentIDs nullable: true schema: type: list items: type: integer description: | If provided, filter only spectra associate with these assignment request IDs. - in: query name: origin nullable: true schema: type: string description: | Return any spectra that have an origin with a (partial) match to any of the values in this comma separated list. - in: query name: label nullable: true schema: type: string description: | Return any spectra that have an origin with a (partial) match to any of the values in this comma separated list. - in: query name: type nullable: true schema: type: string description: | Return spectra of the given type or types (match multiple values using a comma separated list). Types of spectra are defined in the config, e.g., source, host or host_center. - in: query name: commentsFilter nullable: true schema: type: array items: type: string explode: false style: simple description: | Comma-separated string of comment text to filter for spectra matching. - in: query name: commentsFilterAuthor nullable: true schema: type: string description: | Comma separated string of authors. Only comments from these authors are used when filtering with the commentsFilter. - in: query name: commentsFilterBefore nullable: true schema: type: string description: | Arrow-parseable date string (e.g. 2020-01-01). If provided, only return sources that have comments before this time. - in: query name: commentsFilterAfter nullable: true schema: type: string description: | Arrow-parseable date string (e.g. 2020-01-01). If provided, only return sources that have comments after this time. """ if spectrum_id is not None: try: spectrum = Spectrum.get_if_accessible_by( spectrum_id, self.current_user, raise_if_none=True, ) except AccessError: return self.error(f'Could not access spectrum {spectrum_id}.', status=403) comments = (CommentOnSpectrum.query_records_accessible_by( self.current_user, options=[joinedload(CommentOnSpectrum.groups)], ).filter(CommentOnSpectrum.spectrum_id == spectrum_id).all()) annotations = (AnnotationOnSpectrum.query_records_accessible_by( self.current_user).filter( AnnotationOnSpectrum.spectrum_id == spectrum_id).all()) spec_dict = recursive_to_dict(spectrum) spec_dict["instrument_name"] = spectrum.instrument.name spec_dict["groups"] = spectrum.groups spec_dict["reducers"] = spectrum.reducers spec_dict["observers"] = spectrum.observers spec_dict["owner"] = spectrum.owner spec_dict["comments"] = comments spec_dict["annotations"] = annotations external_reducer = (DBSession().query( SpectrumReducer.external_reducer).filter( SpectrumReducer.spectr_id == spectrum_id).first()) if external_reducer is not None: spec_dict["external_reducer"] = external_reducer[0] external_observer = (DBSession().query( SpectrumObserver.external_observer).filter( SpectrumObserver.spectr_id == spectrum_id).first()) if external_observer is not None: spec_dict["external_observer"] = external_observer[0] self.verify_and_commit() return self.success(data=spec_dict) # multiple spectra minimal_payload = self.get_query_argument('minimalPayload', False) observed_before = self.get_query_argument('observedBefore', None) observed_after = self.get_query_argument('observedAfter', None) obj_id = self.get_query_argument('objID', None) instrument_ids = self.get_query_argument('instrumentIDs', None) group_ids = self.get_query_argument('groupIDs', None) followup_ids = self.get_query_argument('followupRequestIDs', None) assignment_ids = self.get_query_argument('assignmentIDs', None) spec_origin = self.get_query_argument('origin', None) spec_label = self.get_query_argument('label', None) spec_type = self.get_query_argument('type', None) comments_filter = self.get_query_argument('commentsFilter', None) comments_filter_author = self.get_query_argument( 'commentsFilterAuthor', None) comments_filter_before = self.get_query_argument( 'commentsFilterBefore', None) comments_filter_after = self.get_query_argument( 'commentsFilterAfter', None) # validate inputs try: observed_before = (arrow.get(observed_before).datetime if observed_before else None) except (TypeError, ParserError): return self.error( f'Cannot parse time input value "{observed_before}".') try: observed_after = (arrow.get(observed_after).datetime if observed_after else None) except (TypeError, ParserError): return self.error( f'Cannot parse time input value "{observed_after}".') try: instrument_ids = self.parse_id_list(instrument_ids, Instrument) group_ids = self.parse_id_list(group_ids, Group) followup_ids = self.parse_id_list(followup_ids, FollowupRequest) assignment_ids = self.parse_id_list(assignment_ids, ClassicalAssignment) except (ValueError, AccessError) as e: return self.error(str(e)) if obj_id is not None: try: Obj.get_if_accessible_by(obj_id, self.current_user) except AccessError: return self.error(f'Cannot find object with ID "{obj_id}"') if spec_origin is not None: try: spec_origin = self.parse_string_list(spec_origin) except TypeError: return self.error( f'Cannot parse "origin" argument "{spec_origin}".') if spec_label is not None: try: spec_label = self.parse_string_list(spec_label) except TypeError: return self.error( f'Cannot parse "label" argument "{spec_label}".') if spec_type is not None: try: spec_type = self.parse_string_list(spec_type) except TypeError: return self.error( f'Cannot parse "type" argument "{spec_type}".') for t in spec_type: if t not in ALLOWED_SPECTRUM_TYPES: return self.error( f'Spectrum type "{t}" is not in list of allowed ' f'spectrum types: {ALLOWED_SPECTRUM_TYPES}.') if comments_filter is not None: try: comments_filter = self.parse_string_list(comments_filter) except TypeError: return self.error( f'Cannot parse "commentsFilter" argument "{comments_filter}".' ) if comments_filter_author is not None: try: comments_filter_author = self.parse_string_list( comments_filter_author) except TypeError: return self.error( f'Cannot parse "commentsFilterAuthor" argument "{comments_filter_author}".' ) if comments_filter_before is not None: try: comments_filter_before = arrow.get( comments_filter_before).datetime except (TypeError, ParserError): return self.error( f'Cannot parse time input value "{comments_filter_before}".' ) if comments_filter_after is not None: try: comments_filter_after = arrow.get( comments_filter_after).datetime except (TypeError, ParserError): return self.error( f'Cannot parse time input value "{comments_filter_after}".' ) # filter the spectra if minimal_payload: columns = [ 'id', 'owner_id', 'obj_id', 'observed_at', 'origin', 'type', 'label', 'instrument_id', 'followup_request_id', 'assignment_id', 'altdata', 'original_file_filename', ] columns = [Column(c) for c in columns] spec_query = Spectrum.query_records_accessible_by( self.current_user, columns=columns, ) else: spec_query = Spectrum.query_records_accessible_by( self.current_user, ) if instrument_ids: spec_query = spec_query.filter( Spectrum.instrument_id.in_(instrument_ids)) if group_ids: spec_query = spec_query.filter( or_(*[ Spectrum.groups.any(Group.id == gid) for gid in group_ids ])) if followup_ids: spec_query = spec_query.filter( Spectrum.followup_request_id.in_(followup_ids)) if assignment_ids: spec_query = spec_query.filter( Spectrum.assignment_id.in_(assignment_ids)) if obj_id: spec_query = spec_query.filter( Spectrum.obj_id.contains(obj_id.strip())) if observed_before: spec_query = spec_query.filter( Spectrum.observed_at <= observed_before) if observed_after: spec_query = spec_query.filter( Spectrum.observed_at >= observed_after) if spec_origin: spec_query = spec_query.filter( or_(* [Spectrum.origin.contains(value) for value in spec_origin])) if spec_label: spec_query = spec_query.filter( or_(*[Spectrum.label.contains(value) for value in spec_label])) if spec_type: spec_query = spec_query.filter(Spectrum.type.in_(spec_type)) spectra = spec_query.all() result_spectra = recursive_to_dict(spectra) if (not minimal_payload or (comments_filter is not None) or (comments_filter_author is not None) or (comments_filter_before is not None) or (comments_filter_after is not None)): new_result_spectra = [] for spec_dict in result_spectra: comments_query = CommentOnSpectrum.query_records_accessible_by( self.current_user, options=[joinedload(CommentOnSpectrum.groups)], ).filter(CommentOnSpectrum.spectrum_id == spec_dict['id']) if not minimal_payload: # grab these before further filtering spec_dict['comments'] = recursive_to_dict( comments_query.all()) if ((comments_filter is not None) or (comments_filter_author is not None) or (comments_filter_before is not None) or (comments_filter_after is not None)): if comments_filter_before: comments_query = comments_query.filter( CommentOnSpectrum.created_at <= comments_filter_before) if comments_filter_after: comments_query = comments_query.filter( CommentOnSpectrum.created_at >= comments_filter_after) comments = comments_query.all() if not comments: # if nothing passed, this spectrum is rejected continue # check the author and free text also match at least one comment author_check = np.zeros(len(comments), dtype=bool) text_check = np.zeros(len(comments), dtype=bool) for i, com in enumerate(comments): if comments_filter_author is None or any([ cf in com.author.username for cf in comments_filter_author ]): author_check[i] = True if comments_filter is None or any( [cf in com.text for cf in comments_filter]): text_check[i] = True # none of the comments have both author and free text match if not np.any(author_check & text_check): continue new_result_spectra.append( spec_dict) # only append what passed all the cuts result_spectra = new_result_spectra if not minimal_payload: # add other data to each spectrum for (spec, spec_dict) in zip(spectra, result_spectra): annotations = ( AnnotationOnSpectrum.query_records_accessible_by( self.current_user).filter( AnnotationOnSpectrum.spectrum_id == spec.id).all()) spec_dict['annotations'] = recursive_to_dict(annotations) external_reducer = (DBSession().query( SpectrumReducer.external_reducer).filter( SpectrumReducer.spectr_id == spec.id).first()) if external_reducer is not None: spec_dict['external_reducer'] = recursive_to_dict( external_reducer[0]) spec_dict['reducers'] = recursive_to_dict(spec.reducers) external_observer = (DBSession().query( SpectrumObserver.external_observer).filter( SpectrumObserver.spectr_id == spec.id).first()) if external_observer is not None: spec_dict['external_observer'] = recursive_to_dict( external_observer[0]) spec_dict['observers'] = recursive_to_dict(spec.observers) spec_dict['instrument_name'] = spec.instrument.name spec_dict['groups'] = recursive_to_dict(spec.groups) spec_dict['owner'] = recursive_to_dict(spec.owner) result_spectra = sorted(result_spectra, key=lambda x: x['observed_at']) self.verify_and_commit() return self.success(data=result_spectra)
def post(self, spectrum_id): """ --- description: Create synthetic photometry from a spectrum tags: - spectra parameters: - in: path name: spectrum_id required: true schema: type: integer - in: query name: filters schema: type: list required: true description: | List of filters responses: 200: content: application/json: schema: SingleSpectrum 400: content: application/json: schema: Error """ data = self.get_json() filters = data.get('filters') spectrum = Spectrum.get_if_accessible_by( spectrum_id, self.current_user, raise_if_none=False, ) if spectrum is None: return self.error(f'No spectrum with id {spectrum_id}') spec_dict = recursive_to_dict(spectrum) wav = spec_dict['wavelengths'] flux = spec_dict['fluxes'] err = spec_dict['errors'] obstime = spec_dict['observed_at'] try: spec = sncosmo.Spectrum(wav, flux * spectrum.astropy_units, err * spectrum.astropy_units) except TypeError: spec = sncosmo.Spectrum(wav, flux * spectrum.astropy_units) data_list = [] for filt in filters: try: mag = spec.bandmag(filt, magsys='ab') magerr = 0 except ValueError as e: return self.error( f"Unable to generate synthetic photometry for filter {filt}: {e}" ) data_list.append({ 'mjd': Time(obstime, format='datetime').mjd, 'ra': spectrum.obj.ra, 'dec': spectrum.obj.dec, 'mag': mag, 'magerr': magerr, 'filter': filt, 'limiting_mag': 25.0, }) if len(data_list) > 0: df = pd.DataFrame.from_dict(data_list) df['magsys'] = 'ab' data_out = { 'obj_id': spectrum.obj.id, 'instrument_id': spectrum.instrument.id, 'group_ids': [g.id for g in self.current_user.accessible_groups], **df.to_dict(orient='list'), } add_external_photometry(data_out, self.associated_user_object) return self.success() return self.success()