def list(project_id): """ Return a list of currently running tasks for a given prediction --- produces: - application/json responses: 200: description: Task List """ if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 pred_id = request.args.get("pred_id") task_type = request.args.get("type", "retrain,tfrecords,ecr").split(",") if pred_id is None: return err(400, "pred_id param must be specified"), 400 else: pred_id = int(pred_id) try: return TaskService.list(pred_id, task_type) except NotFound: return err(404, "task not found"), 404 except Exception as e: current_app.logger.error(traceback.format_exc()) error_msg = f"Unhandled error: {str(e)}" return err(500, error_msg), 500
def get(self, model_id): """ Fetch all predictions for a model --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model required: true type: integer responses: 200: description: List of all predictions for the model 404: description: No predictions found 500: description: Internal Server Error """ try: # check if this model exists ml_model_dto = MLModelService.get_ml_model_by_id(model_id) predictions = PredictionService.get_all_by_model(ml_model_dto.model_id) return predictions, 200 except PredictionsNotFound: return err(404, "Predictions not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(self, model_id, prediction_id): """ TileJSON response for the predictions --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model required: true type: integer - in: path name: prediction_id description: ID of the Prediction required: true type: integer responses: 200: description: ID of the prediction 400: description: Invalid Request 500: description: Internal Server Error """ try: return PredictionTileService.tilejson(model_id, prediction_id) except PredictionsNotFound: return err(404, "Prediction TileJSON not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def list(): """ Get all ML models --- produces: - application/json responses: 200: description: List of ML models 404: description: No models found 500: description: Internal Server Error """ model_filter = request.args.get("filter", "") model_archived = request.args.get("archived", "false") if model_archived == "false": model_archived = False elif model_archived == "true": model_archived = True else: return err(400, "archived param must be 'true' or 'false'"), 400 try: ml_models = ProjectService.get_all(current_user.id, model_filter, model_archived) return jsonify(ml_models), 200 except NotFound: return err(404, "no models found"), 404 except Exception as e: current_app.logger.error(traceback.format_exc()) error_msg = f"Unhandled error: {str(e)}" return err(500, error_msg), 500
def get(self): """ Get all ML models --- produces: - application/json responses: 200: description: List of ML models 404: description: No models found 500: description: Internal Server Error """ model_filter = request.args.get('filter', '') model_archived = request.args.get('archived', 'false') if model_archived == 'false': model_archived = False elif model_archived == 'true': model_archived = True else: return err(400, "archived param must be 'true' or 'false'"), 400 try: ml_models = MLModelService.get_all(model_filter, model_archived) return ml_models, 200 except NotFound: return err(404, "no models found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(self, model_id): """ Get model information with the ID --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model to be fetched required: true type: integer responses: 200: description: ML Model information 404: description: Model doesn't exist 500: description: Internal Server Error """ try: ml_model_dto = MLModelService.get_ml_model_by_id(model_id) return ml_model_dto.to_primitive(), 200 except NotFound: return err(404, "model not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def delete(self, project_id, prediction_id): """ Empty the SQS queue of chips to inference --- produces: - application/json responses: 200: description: Status Update """ if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 try: queues = boto3.client("sqs").list_queues( QueueNamePrefix="{stack}-models-{model}-prediction-{pred}-".format( stack=CONFIG.EnvironmentConfig.STACK, model=project_id, pred=prediction_id, ) ) for queue in queues["QueueUrls"]: boto3.client("sqs").purge_queue(QueueUrl=queue) return {"status": 200, "message": "queue purged"}, 200 except Exception as e: if str(e).find("does not exist") != -1: return {"name": CONFIG.EnvironmentConfig.STACK, "status": "None"}, 200 else: current_app.logger.error(traceback.format_exc()) return err(500, "Failed to get stack info"), 500
def delete(self, model_id, prediction_id): if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 try: queues = response = boto3.client('sqs').list_queues( QueueNamePrefix="{stack}-models-{model}-prediction-{pred}-".format( stack = CONFIG.EnvironmentConfig.STACK, model = model_id, pred = prediction_id ) ) for queue in queues['QueueUrls']: boto3.client('sqs').purge_queue( QueueUrl=queue ) return { "status": 200, "message": "queue purged" }, 200 except Exception as e: if str(e).find("does not exist") != -1: return { "name": stack, "status": "None" }, 200 else: error_msg = f'Prediction Stack Info Error: {str(e)}' current_app.logger.error(error_msg) return err(500, "Failed to get stack info"), 500
def post(self, project_id): """ Store predictions for an ML Model --- produces: - application/json parameters: - in: body name: body required: true type: string description: JSON object of predictions schema: properties: modelId: type: integer description: ML Model ID required: true version: type: string description: semver version of the Model required: true docker_url: type: string description: URL to docker image required: false bbox: type: array of floats description: BBOX of the predictions required: true responses: 200: description: ID of the prediction 400: description: Invalid Request 500: description: Internal Server Error """ try: payload = request.get_json() # check if this model exists ProjectService.get_ml_model_by_id(project_id) # check if the version is registered prediction_id = PredictionService.create(project_id, payload) return {"prediction_id": prediction_id}, 200 except NotFound: return err(404, "model not found"), 404 except VersionExists: return err(400, "Version Exists"), 400 except DataError as e: current_app.logger.error(f"Error validating request: {str(e)}") return err(400, str(4)), 400 except Exception as e: current_app.logger.error(traceback.format_exc()) error_msg = f"Unhandled error: {str(e)}" return err(500, error_msg), 500
def delete(self, model_id, prediction_id): if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 try: stack = "{stack}-models-{model}-prediction-{prediction}".format( stack=CONFIG.EnvironmentConfig.STACK, model=model_id, prediction=prediction_id ) boto3.client('cloudformation').delete_stack( StackName=stack ) return self.get(model_id, prediction_id) except Exception as e: if str(e).find("does not exist") != -1: return { "name": stack, "status": "None" }, 200 else: error_msg = f'Prediction Stack Info Error: {str(e)}' current_app.logger.error(error_msg) return err(500, "Failed to get stack info"), 500
def delete(self, model_id): """ Deletes an existing model and it's predictions --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model to be deleted required: true type: integer responses: 200: description: ML Model deleted 404: description: Model doesn't exist 500: description: Internal Server Error """ try: MLModelService.delete_ml_model(model_id) return {"success": "model deleted"}, 200 except NotFound: return err(404, "model not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def post(self, model_id, prediction_id): try: payload = request.get_json() inferences = PredictionService.inferences(prediction_id) if payload.get("id") is None or payload.get("validity") is None: return err(400, "id and validity keys must be present"), 400 tile = PredictionTileService.get(payload["id"]) if tile is None: return err(404, "prediction tile not found"), 404 current = tile.validity if current is None: current = {} for inf in inferences: p = payload["validity"].get(inf) if p is None or type(p) is not bool: continue current[inf] = p PredictionTileService.validity(payload["id"], current) return current, 200 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return (500, error_msg), 500
def get(self, project_id, prediction_id): """ Return metadata about messages currently in the inference queue --- produces: - application/json responses: 200: description: Status Update """ if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 try: queues = boto3.client("sqs").list_queues( QueueNamePrefix="{stack}-models-{model}-prediction-{pred}-".format( stack=CONFIG.EnvironmentConfig.STACK, model=project_id, pred=prediction_id, ) ) active = "" dead = "" for queue in queues["QueueUrls"]: if "-dead-queue" in queue: dead = queue elif "-queue" in queue: active = queue active = boto3.client("sqs").get_queue_attributes( QueueUrl=active, AttributeNames=[ "ApproximateNumberOfMessages", "ApproximateNumberOfMessagesNotVisible", ], ) dead = boto3.client("sqs").get_queue_attributes( QueueUrl=dead, AttributeNames=["ApproximateNumberOfMessages"] ) return { "queued": int(active["Attributes"]["ApproximateNumberOfMessages"]), "inflight": int( active["Attributes"]["ApproximateNumberOfMessagesNotVisible"] ), "dead": int(dead["Attributes"]["ApproximateNumberOfMessages"]), }, 200 except Exception as e: if str(e).find("does not exist") != -1: return {"name": CONFIG.EnvironmentConfig.STACK, "status": "None"}, 200 else: current_app.logger.error(traceback.format_exc()) return err(500, "Failed to get stack info"), 500
def get(model_id, imagery_id): try: imagery = ImageryService.get(imagery_id) return imagery, 200 except ImageryNotFound: return err(404, "Imagery not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(model_id, integration_id): try: integration = IntegrationService.get(integration_id) return integration, 200 except IntegrationNotFound: return err(404, "Integration not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(self, project_id, prediction_id, z, x, y): """ Mapbox Vector Tile Response --- produces: - application/x-protobuf parameters: - in: path name: project_id description: ID of the Model required: true type: integer - in: path name: prediction_id description: ID of the Prediction required: true type: integer - in: path name: z description: zoom of the tile to fetch required: true type: integer - in: path name: y description: y coord of the tile to fetch required: true type: integer - in: path name: x description: x coord of the tile to fetch required: true type: integer responses: 200: description: ID of the prediction 400: description: Invalid Request 500: description: Internal Server Error """ try: tile = PredictionTileService.mvt(project_id, prediction_id, z, x, y) response = make_response(tile) response.headers["content-type"] = "application/x-protobuf" return response except PredictionsNotFound: return err(404, "Prediction tile not found"), 404 except Exception as e: current_app.logger.error(traceback.format_exc()) error_msg = f"Unhandled error: {str(e)}" return err(500, error_msg), 500
def post(self, model_id, prediction_id): if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 payload = request.data tiler = tileschemes.WebMercator() try: prediction = PredictionService.get_prediction_by_id(prediction_id) poly = shape(geojson.loads(payload)) project = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), pyproj.Proj(init='epsg:3857') ) poly = transform(project, poly) tiles = tilecover.cover_geometry(tiler, poly, prediction.tile_zoom) queue_name = "{stack}-models-{model}-prediction-{prediction}-queue".format( stack=CONFIG.EnvironmentConfig.STACK, model=model_id, prediction=prediction_id ) queue = boto3.resource('sqs').get_queue_by_name( QueueName=queue_name ) cache = [] for tile in tiles: if len(cache) < 10: cache.append({ "Id": str(tile.z) + "-" + str(tile.x) + "-" + str(tile.y), "MessageBody": json.dumps({ "x": tile.x, "y": tile.y, "z": tile.z }) }) else: queue.send_messages( Entries=cache ) cache = [] return {}, 200 except Exception as e: error_msg = f'Predction Tiler Error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def get(self, model_id, prediction_id): if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 try: queues = response = boto3.client('sqs').list_queues( QueueNamePrefix="{stack}-models-{model}-prediction-{pred}-".format( stack = CONFIG.EnvironmentConfig.STACK, model = model_id, pred = prediction_id ) ) active = "" dead = "" for queue in queues['QueueUrls']: if "-dead-queue" in queue: dead = queue elif "-queue" in queue: active = queue active = boto3.client('sqs').get_queue_attributes( QueueUrl=active, AttributeNames = [ 'ApproximateNumberOfMessages', 'ApproximateNumberOfMessagesNotVisible' ] ) dead = boto3.client('sqs').get_queue_attributes( QueueUrl=dead, AttributeNames = [ 'ApproximateNumberOfMessages' ] ) return { "queued": int(active['Attributes']['ApproximateNumberOfMessages']), "inflight": int(active['Attributes']['ApproximateNumberOfMessagesNotVisible']), "dead": int(dead['Attributes']['ApproximateNumberOfMessages']) }, 200 except Exception as e: if str(e).find("does not exist") != -1: return { "name": stack, "status": "None" }, 200 else: error_msg = f'Prediction Stack Info Error: {str(e)}' current_app.logger.error(error_msg) return err(500, "Failed to get stack info"), 500
def delete(task_id): if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 try: return TaskService.delete(task_id) except NotFound: return err(404, "task not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def post(self, project_id, prediction_id): """ Retrain a model with validated predictions --- produces: - application/json """ if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 if CONFIG.EnvironmentConfig.ASSET_BUCKET is None: return err(501, "Not Configured"), 501 payload = request.get_json() pred = PredictionService.get_prediction_by_id(prediction_id) try: batch = boto3.client( service_name="batch", region_name="us-east-1", endpoint_url="https://batch.us-east-1.amazonaws.com", ) # Submit to AWS Batch to convert to ECR image job = batch.submit_job( jobName=CONFIG.EnvironmentConfig.STACK + "-retrain", jobQueue=CONFIG.EnvironmentConfig.STACK + "-gpu-queue", jobDefinition=CONFIG.EnvironmentConfig.STACK + "-retrain-job", containerOverrides={ "environment": [ {"name": "MODEL_ID", "value": str(project_id)}, {"name": "PREDICTION_ID", "value": str(prediction_id)}, {"name": "TILE_ENDPOINT", "value": str(pred.imagery_id)}, {"name": "CONFIG_RETRAIN", "value": str(json.dumps(payload))}, ] }, ) TaskService.create( { "pred_id": prediction_id, "type": "retrain", "batch_id": job.get("jobId"), } ) except Exception: current_app.logger.error(traceback.format_exc()) return err(500, "Failed to start GPU Retrain"), 500
def post(self, model_id, prediction_id): """ Retrain a model with validated predictions --- produces: - application/json """ if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 if CONFIG.EnvironmentConfig.ASSET_BUCKET is None: return err(501, "Not Configured"), 501 payload = request.get_json() if payload.get("imagery") is None: return err(400, "imagery key required in body"), 400 try: batch = boto3.client( service_name='batch', region_name='us-east-1', endpoint_url='https://batch.us-east-1.amazonaws.com' ) # Submit to AWS Batch to convert to ECR image job = batch.submit_job( jobName=CONFIG.EnvironmentConfig.STACK + '-retrain', jobQueue=CONFIG.EnvironmentConfig.STACK + '-gpu-queue', jobDefinition=CONFIG.EnvironmentConfig.STACK + '-gpu-job', containerOverrides={ 'environment': [ { 'name': 'MODEL_ID', 'value': str(model_id) }, { 'name': 'PREDICTION_ID', 'value': str(prediction_id) }, { 'name': 'TILE_ENDPOINT', 'value': payload.get("imagery") }, ] } ) TaskService.create({ 'pred_id': prediction_id, 'type': 'retrain', 'batch_id': job.get('jobId') }) except Exception as e: error_msg = f'Batch GPU Error: {str(e)}' current_app.logger.error(error_msg) return err(500, "Failed to start GPU Retrain"), 500
def put(project_id): """ Update an existing model --- produces: - application/json parameters: - in: path name: project_id description: ID of the Model to update required: true type: integer - in: body name: body required: true type: string description: JSON object of model information schema: properties: name: type: string description: name of the ML model source: type: string description: source of the ML model project_url: type: string description: URL to project page responses: 200: description: Updated model information 404: description: Model doesn't exist 500: description: Internal Server Error """ try: dto = ProjectDTO(request.get_json()) dto.validate() model_id = ProjectService.update_ml_model(dto) return {"model_id": model_id}, 200 except NotFound: return err(404, "model not found"), 404 except Exception as e: current_app.logger.error(traceback.format_exc()) error_msg = f"Unhandled error: {str(e)}" return err(500, error_msg), 500
def use(model_id, integration_id): integration_payload = request.get_json(); try: IntegrationService.payload(integration_id, integration_payload) except IntegrationNotFound: return err(404, "Integration not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500 return { "status": "created" }, 200
def get(self, model_id, prediction_id): try: prediction = PredictionService.get_prediction_by_id(prediction_id) pred = { "predictionsId": prediction.id, "modelId": prediction.model_id, "version": prediction.version, "dockerUrl": prediction.docker_url, "tileZoom": prediction.tile_zoom, "logLink": prediction.log_link, "modelLink": prediction.model_link, "dockerLink": prediction.docker_link, "saveLink": prediction.save_link, "infSupertile": prediction.inf_supertile, "tfrecordLink": prediction.tfrecord_link, "checkpointLink": prediction.checkpoint_link, "infList": prediction.inf_list, "infBinary": prediction.inf_binary, "infType": prediction.inf_type } return pred, 200 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def post(self, project_id, prediction_id): """ Import a file of GeoJSON inferences into the prediction Typically used to seed TFRecord creation preceding model creation --- produces: - application/json responses: 200: description: ID of the prediction 400: description: Invalid Request 500: description: Internal Server Error """ files = list(request.files.keys()) if len(files) == 0: return err(400, "File not found in request"), 400 inferences = request.files[files[0]] try: pred = PredictionService.get_prediction_by_id(prediction_id) infstream = io.BytesIO() inferences.save(infstream) inferences = infstream.getvalue().decode("UTF-8").split("\n") data = [] for inf in inferences: if len(inf) == 0: continue data.append(geojson.loads(inf)) PredictionTileService.create_geojson(pred, data) except InvalidGeojson as e: return err(400, str(e)), 400 except PredictionsNotFound: return err(404, "Predictions not found"), 404 except Exception as e: current_app.logger.error(traceback.format_exc()) error_msg = f"Unhandled error: {str(e)}" return err(500, error_msg), 500
def get(self, model_id, prediction_id): """ Return status of a prediction stack --- produces: - application/json responses: 200: description: ID of the prediction 400: description: Invalid Request 500: description: Internal Server Error """ if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws": return err(501, "stack must be in 'aws' mode to use this endpoint"), 501 try: stack = "{stack}-models-{model}-prediction-{prediction}".format( stack=CONFIG.EnvironmentConfig.STACK, model=model_id, prediction=prediction_id ) res = boto3.client('cloudformation').describe_stacks( StackName=stack ) stack = { "id": res.get("Stacks")[0].get("StackId"), "name": stack, "status": res.get("Stacks")[0].get("StackStatus") } return stack, 200 except Exception as e: if str(e).find("does not exist") != -1: return { "name": stack, "status": "None" }, 200 else: error_msg = f'Prediction Stack Info Error: {str(e)}' current_app.logger.error(error_msg) return err(500, "Failed to get stack info"), 500
def delete(token_id): """ Delete a specific Token --- produces: - application/json responses: 200: description: Token """ try: return TokenService.delete(current_user.id, token_id) except NotFound: return err(404, "No Token Found"), 404 except Exception as e: current_app.logger.error(traceback.format_exc()) error_msg = f"Unhandled error: {str(e)}" return err(500, error_msg), 500
def post(model_id): try: imagery = request.get_json() imagery_id = ImageryService.create(model_id, imagery) return {"model_id": model_id, "imagery_id": imagery_id}, 200 except Exception as e: error_msg = f'Imagery Post: {str(e)}' current_app.logger.error(error_msg) return err(500, "Failed to save imagery source to DB"), 500
def patch(self, model_id, prediction_id): """ Allow updating of links in model --- produces: - application/json parameters: - in: path name: model_id description: ID of the Model required: true type: integer - in: path name: prediction_id description: ID of the Prediction required: true type: integer responses: 200: description: Prediction updated successfully 404: description: Prediction not found to update 500: description: Internal Server Error """ try: updated_prediction = request.get_json() if updated_prediction is None: return err(400, "prediction must be json object"), 400 prediction_id = PredictionService.patch(prediction_id, updated_prediction) return { "model_id": model_id, "prediction_id": prediction_id }, 200 except NotFound: return err(404, "prediction not found"), 404 except Exception as e: error_msg = f'Unhandled error: {str(e)}' current_app.logger.error(error_msg) return err(500, error_msg), 500
def generate(): nonlocal first if req_format == "geojson": yield '{ "type": "FeatureCollection", "features": [' elif req_format == "csv": output = io.StringIO() rowdata = ["ID", "QuadKey", "QuadKeyGeom"] rowdata.extend(inferences) csv.writer(output, quoting=csv.QUOTE_NONNUMERIC).writerow(rowdata) yield output.getvalue() for row in stream: if req_inferences != 'all' and row[3].get(req_inferences) is None: continue if req_inferences != 'all' and row[3].get(req_inferences) <= req_threshold: continue if req_format == "geojson" or req_format == "geojsonld": properties_dict = {} if row[4]: properties_dict = row[3] valid_dict = {} valid_dict.update({'validity': row[4]}) properties_dict.update(valid_dict) else: properties_dict = row[3] feat = { "id": row[0], "quadkey": row[1], "type": "Feature", "properties": properties_dict, "geometry": json.loads(row[2]) } if req_format == "geojsonld": yield json.dumps(feat) + '\n' elif req_format == "geojson": if first == False: first = True yield '\n' + json.dumps(feat) else: yield ',\n' + json.dumps(feat) elif req_format == "csv": output = io.StringIO() rowdata = [ row[0], row[1], row[2]] for inf in inferences: rowdata.append(row[3].get(inf, 0.0)) csv.writer(output, quoting=csv.QUOTE_NONNUMERIC).writerow(rowdata) yield output.getvalue() else: return err(501, "not a valid export type, valid export types are: geojson, csv, and npz"), 501 if req_format == "geojson": yield ']}'