def delete(self, model_id): """ Deletes an existing model and it's predictions --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model to be deleted required: true type: integer responses: 200: description: ML Model deleted 404: description: Model doesn't exist 500: description: Internal Server Error """ try: MLModelService.delete_ml_model(model_id) return {"success": "model deleted"}, 200 except NotFound: return err(404, "model not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(self): """ Get all ML models --- produces: - application/json responses: 200: description: List of ML models 404: description: No models found 500: description: Internal Server Error """ model_filter = request.args.get('filter', '') model_archived = request.args.get('archived', 'false') if model_archived == 'false': model_archived = False elif model_archived == 'true': model_archived = True else: return err(400, "archived param must be 'true' or 'false'"), 400 try: ml_models = MLModelService.get_all(model_filter, model_archived) return ml_models, 200 except NotFound: return err(404, "no models found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(self, model_id): """ Get model information with the ID --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model to be fetched required: true type: integer responses: 200: description: ML Model information 404: description: Model doesn't exist 500: description: Internal Server Error """ try: ml_model_dto = MLModelService.get_ml_model_by_id(model_id) return ml_model_dto.to_primitive(), 200 except NotFound: return err(404, "model not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(self, model_id): """ Fetch all predictions for a model --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model required: true type: integer responses: 200: description: List of all predictions for the model 404: description: No predictions found 500: description: Internal Server Error """ try: # check if this model exists ml_model_dto = MLModelService.get_ml_model_by_id(model_id) predictions = PredictionService.get_all_by_model(ml_model_dto.model_id) return predictions, 200 except PredictionsNotFound: return err(404, "Predictions not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def post(self, model_id): """ Store predictions for an ML Model --- produces: - application/json parameters: - in: body name: body required: true type: string description: JSON object of predictions schema: properties: modelId: type: integer description: ML Model ID required: true version: type: string description: semver version of the Model required: true docker_url: type: string description: URL to docker image required: false bbox: type: array of floats description: BBOX of the predictions required: true responses: 200: description: ID of the prediction 400: description: Invalid Request 500: description: Internal Server Error """ try: payload = request.get_json() # check if this model exists ml_model_dto = MLModelService.get_ml_model_by_id(model_id) # check if the version is registered prediction_id = PredictionService.create(model_id, payload) return {"prediction_id": prediction_id}, 200 except NotFound: return err(404, "model not found"), 404 except VersionExists: return err(400, "Version Exists"), 400 except DataError as e: current_app.logger.error(f'Error validating request: {str(e)}') return err(400, str(4)), 400 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(self, model_id): """ Get aggregated prediction tile for a model within the supplied bbox and tile size --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model required: true type: integer - in: query name: bbox description: bbox in the wsen format. Comma separated floats required: true type: string - in: query name: zoom description: zoom level for specifying aggregate tile size required: true type: integer responses: 200: description: List of all predictions for the model within supplied bbox 404: description: No predictions found 500: description: Internal Server Error """ try: bbox = request.args.get('bbox', '') zoom = request.args.get('zoom', '') if (bbox is None or bbox == ''): return {"error": 'A bbox is required'}, 400 if (zoom is None or zoom == ''): return {"error": 'Zoom level is required for aggregation'} # check if this model exists ml_model_dto = MLModelService.get_ml_model_by_id(model_id) tiles = PredictionTileService.get_aggregated_tiles( ml_model_dto.model_id, bbox, zoom) return tiles, 200 except NotFound: return {"error": "Model not found"}, 404 except PredictionsNotFound: return {"error": "No predictions for this bbox"}, 404 except ValueError as e: return {"error": str(e)}, 400 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return {"error": error_msg}, 500
def put(self, model_id): """ Update an existing model --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model to update required: true type: integer - in: body name: body required: true type: string description: JSON object of model information schema: properties: name: type: string description: name of the ML model source: type: string description: source of the ML model dockerhub_hash: type: string description: dockerhub hash dockerhub_url: type: string description: dockerhub url responses: 200: description: Updated model information 404: description: Model doesn't exist 500: description: Internal Server Error """ try: updated_model_dto = MLModelDTO(request.get_json()) print(updated_model_dto.to_primitive()) updated_model_dto.validate() model_id = MLModelService.update_ml_model(updated_model_dto) return {"model_id": model_id}, 200 except NotFound: return {"error": "model not found"}, 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return {"error": error_msg}
def post(self, model_id: int): """ Aggregate ml predictions for polygons in the supplied GeoJSON --- produces: - application/json parameters: - in: body name: body required: true type: string description: GeoJSON FeatureCollection of Polygons responses: 200: description: GeoJSON FeatureCollection with prediction data in properties 404: description: Model not found 400: description: Invalid Request 500: description: Internal Server Error """ try: # FIXME - validate the geojson data = request.get_json() if validate_geojson(data) is False: raise InvalidGeojson # check if the model exists ml_model_dto = MLModelService.get_ml_model_by_id(model_id) # get the bbox the geojson bbox = geojson_bounds(data) prediction_tile_geojson = PredictionTileService.get_aggregated_tiles_geojson( ml_model_dto.model_id, bbox, data) return prediction_tile_geojson, 200 except InvalidGeojson: return {"error": "Invalid GeoJSON"}, 400 except NotFound: return {"error": "Model not found"}, 404 except PredictionsNotFound: return {"error": "No predictions for this bbox"}, 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return {"error": error_msg}, 500
def post(self): """ Subscribe a new ML model --- produces: - application/json parameters: - in: body name: body required: true type: string description: JSON object of model information schema: properties: name: type: string description: name of the ML model source: type: string description: source of the ML model dockerhub_hash: type: string description: dockerhub hash dockerhub_url: type: string description: dockerhub url responses: 200: description: ML Model subscribed 400: description: Invalid Request 500: description: Internal Server Error """ try: model_dto = MLModelDTO(request.get_json()) current_app.logger.info(f'request: {str(request.get_json())}') model_dto.validate() model_id = MLModelService.subscribe_ml_model(model_dto) return {"model_id": model_id}, 200 except DataError as e: current_app.logger.error(f'Error validating request: {str(e)}') return str(e), 400 except IntegrityError as e: current_app.logger.error( f'A model with the same name already exists: {str(e)}') return str(e), 400
def get(self, model_id): """ Fetch predictions for a model within supplied bbox --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model required: true type: integer - in: query name: bbox description: bbox in the wsen format. Comma separated floats required: true type: string responses: 200: description: List of all predictions for the model within supplied bbox 404: description: No predictions found 500: description: Internal Server Error """ try: bbox = request.args.get('bbox', '') if (bbox is None or bbox == ''): return {"error": 'A bbox is required'}, 400 # check if this model exists ml_model_dto = MLModelService.get_ml_model_by_id(model_id) boundingBox = bbox_str_to_list(bbox) predictions = PredictionService.get(ml_model_dto.model_id, boundingBox) return predictions, 200 except PredictionsNotFound: return {"error": "Predictions not found"}, 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return {"error": error_msg}, 500
def get(self): """ Get all ML models --- produces: - application/json responses: 200: description: List of ML models 404: description: No models found 500: description: Internal Server Error """ try: ml_models = MLModelService.get_all() return ml_models, 200 except NotFound: return {"error": "no models found"}, 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return {"error": error_msg}
def post(self, model_id): """ Store predictions for an ML Model --- produces: - application/json parameters: - in: body name: body required: true type: string description: JSON object of predictions schema: properties: modelId: type: integer description: ML Model ID required: true version: type: string description: semver version of the Model required: true dockerhub_hash: type: string description: dockerhub hash required: false bbox: type: array of floats description: BBOX of the predictions required: true responses: 200: description: ID of the prediction 400: description: Invalid Request 500: description: Internal Server Error """ try: payload = request.get_json() version = payload['version'] # check if this model exists ml_model_dto = MLModelService.get_ml_model_by_id(model_id) # check if the version is registered model_version = MLModelVersionService.get_version_by_model_version( ml_model_dto.model_id, version) prediction_id = PredictionService.create(model_id, model_version.version_id, payload) return {"prediction_id": prediction_id}, 200 except VersionNotFound: # if not, add it try: version_array = version_to_array(version) version_dto = MLModelVersionDTO() version_dto.model_id = model_id version_dto.version_major = version_array[0] version_dto.version_minor = version_array[1] version_dto.version_patch = version_array[2] version_id = MLModelVersionService.create_version(version_dto) prediction_id = PredictionService.create( model_id, version_id, payload) return {"prediction_id": prediction_id}, 200 except DataError as e: current_app.logger.error(f'Error validating request: {str(e)}') return str(e), 400 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return {"error": error_msg}, 500 except NotFound: return {"error": "model not found"}, 404 except DataError as e: current_app.logger.error(f'Error validating request: {str(e)}') return str(e), 400 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return {"error": error_msg}, 500