예제 #1
0
    def post(self, model_id, prediction_id):
        try:
            payload = request.get_json()

            inferences = PredictionService.inferences(prediction_id)

            if payload.get("id") is None or payload.get("validity") is None:
                return err(400, "id and validity keys must be present"), 400

            tile = PredictionTileService.get(payload["id"])
            if tile is None:
                return err(404, "prediction tile not found"), 404

            current = tile.validity
            if current is None:
                current = {}

            for inf in inferences:
                p = payload["validity"].get(inf)

                if p is None or type(p) is not bool:
                    continue

                current[inf] = p

            PredictionTileService.validity(payload["id"], current)

            return current, 200
        except Exception as e:
            error_msg = f'Unhandled error: {str(e)}'
            current_app.logger.error(error_msg)
            return (500, error_msg), 500
예제 #2
0
    def post(self, project_id):
        """
        Store predictions for an ML Model
        ---
        produces:
            - application/json
        parameters:
            - in: body
              name: body
              required: true
              type: string
              description: JSON object of predictions
              schema:
                properties:
                    modelId:
                        type: integer
                        description: ML Model ID
                        required: true
                    version:
                        type: string
                        description: semver version of the Model
                        required: true
                    docker_url:
                        type: string
                        description: URL to docker image
                        required: false
                    bbox:
                        type: array of floats
                        description: BBOX of the predictions
                        required: true
        responses:
            200:
                description: ID of the prediction
            400:
                description: Invalid Request
            500:
                description: Internal Server Error
        """
        try:
            payload = request.get_json()

            # check if this model exists
            ProjectService.get_ml_model_by_id(project_id)

            # check if the version is registered
            prediction_id = PredictionService.create(project_id, payload)

            return {"prediction_id": prediction_id}, 200
        except NotFound:
            return err(404, "model not found"), 404
        except VersionExists:
            return err(400, "Version Exists"), 400
        except DataError as e:
            current_app.logger.error(f"Error validating request: {str(e)}")
            return err(400, str(4)), 400
        except Exception as e:
            current_app.logger.error(traceback.format_exc())

            error_msg = f"Unhandled error: {str(e)}"
            return err(500, error_msg), 500
예제 #3
0
    def get(self, model_id, prediction_id):
        try:
            prediction = PredictionService.get_prediction_by_id(prediction_id)

            pred = {
                "predictionsId": prediction.id,
                "modelId": prediction.model_id,
                "version": prediction.version,
                "dockerUrl": prediction.docker_url,
                "tileZoom": prediction.tile_zoom,
                "logLink": prediction.log_link,
                "modelLink": prediction.model_link,
                "dockerLink": prediction.docker_link,
                "saveLink": prediction.save_link,
                "infSupertile": prediction.inf_supertile,
                "tfrecordLink": prediction.tfrecord_link,
                "checkpointLink": prediction.checkpoint_link,
                "infList": prediction.inf_list,
                "infBinary": prediction.inf_binary,
                "infType": prediction.inf_type
            }

            return pred, 200
        except Exception as e:
            error_msg = f'Unhandled error: {str(e)}'
            current_app.logger.error(error_msg)
            return err(500, error_msg), 500
예제 #4
0
    def get(self, model_id):
        """
        Fetch all predictions for a model
        ---
        produces:
            - application/json
        parameters:
            - in: path
              name: model_id
              description: ID of the Model
              required: true
              type: integer
        responses:
            200:
                description: List of all predictions for the model
            404:
                description: No predictions found
            500:
                description: Internal Server Error
        """
        try:
            # check if this model exists
            ml_model_dto = MLModelService.get_ml_model_by_id(model_id)

            predictions = PredictionService.get_all_by_model(ml_model_dto.model_id)
            return predictions, 200
        except PredictionsNotFound:
            return err(404, "Predictions not found"), 404
        except Exception as e:
            error_msg = f'Unhandled error: {str(e)}'
            current_app.logger.error(error_msg)
            return err(500, error_msg), 500
예제 #5
0
    def post(self, model_id, prediction_id):
        if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws":
            return err(501, "stack must be in 'aws' mode to use this endpoint"), 501

        payload = request.data

        tiler = tileschemes.WebMercator()

        try:
            prediction = PredictionService.get_prediction_by_id(prediction_id)

            poly = shape(geojson.loads(payload))

            project = partial(
                pyproj.transform,
                pyproj.Proj(init='epsg:4326'),
                pyproj.Proj(init='epsg:3857')
            )

            poly = transform(project, poly)

            tiles = tilecover.cover_geometry(tiler, poly, prediction.tile_zoom)

            queue_name = "{stack}-models-{model}-prediction-{prediction}-queue".format(
                stack=CONFIG.EnvironmentConfig.STACK,
                model=model_id,
                prediction=prediction_id
            )

            queue = boto3.resource('sqs').get_queue_by_name(
                QueueName=queue_name
            )

            cache = []
            for tile in tiles:
                if len(cache) < 10:
                    cache.append({
                        "Id": str(tile.z) + "-" + str(tile.x) + "-" + str(tile.y),
                        "MessageBody": json.dumps({
                            "x": tile.x,
                            "y": tile.y,
                            "z": tile.z
                        })
                    })
                else:
                    queue.send_messages(
                        Entries=cache
                    )
                    cache = []

            return {}, 200
        except Exception as e:
            error_msg = f'Predction Tiler Error: {str(e)}'
            current_app.logger.error(error_msg)
            return err(500, error_msg), 500
예제 #6
0
    def post(self, project_id, prediction_id):
        """
        Retrain a model with validated predictions
        ---
        produces:
            - application/json
        """

        if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws":
            return err(501, "stack must be in 'aws' mode to use this endpoint"), 501

        if CONFIG.EnvironmentConfig.ASSET_BUCKET is None:
            return err(501, "Not Configured"), 501

        payload = request.get_json()

        pred = PredictionService.get_prediction_by_id(prediction_id)

        try:
            batch = boto3.client(
                service_name="batch",
                region_name="us-east-1",
                endpoint_url="https://batch.us-east-1.amazonaws.com",
            )

            # Submit to AWS Batch to convert to ECR image
            job = batch.submit_job(
                jobName=CONFIG.EnvironmentConfig.STACK + "-retrain",
                jobQueue=CONFIG.EnvironmentConfig.STACK + "-gpu-queue",
                jobDefinition=CONFIG.EnvironmentConfig.STACK + "-retrain-job",
                containerOverrides={
                    "environment": [
                        {"name": "MODEL_ID", "value": str(project_id)},
                        {"name": "PREDICTION_ID", "value": str(prediction_id)},
                        {"name": "TILE_ENDPOINT", "value": str(pred.imagery_id)},
                        {"name": "CONFIG_RETRAIN", "value": str(json.dumps(payload))},
                    ]
                },
            )

            TaskService.create(
                {
                    "pred_id": prediction_id,
                    "type": "retrain",
                    "batch_id": job.get("jobId"),
                }
            )
        except Exception:
            current_app.logger.error(traceback.format_exc())

            return err(500, "Failed to start GPU Retrain"), 500
예제 #7
0
    def post(self, project_id, prediction_id):
        """
        Import a file of GeoJSON inferences into the prediction

        Typically used to seed TFRecord creation preceding model creation
        ---
        produces:
            - application/json
        responses:
            200:
                description: ID of the prediction
            400:
                description: Invalid Request
            500:
                description: Internal Server Error
        """

        files = list(request.files.keys())
        if len(files) == 0:
            return err(400, "File not found in request"), 400

        inferences = request.files[files[0]]

        try:
            pred = PredictionService.get_prediction_by_id(prediction_id)

            infstream = io.BytesIO()
            inferences.save(infstream)
            inferences = infstream.getvalue().decode("UTF-8").split("\n")

            data = []
            for inf in inferences:
                if len(inf) == 0:
                    continue

                data.append(geojson.loads(inf))

            PredictionTileService.create_geojson(pred, data)
        except InvalidGeojson as e:
            return err(400, str(e)), 400
        except PredictionsNotFound:
            return err(404, "Predictions not found"), 404
        except Exception as e:
            current_app.logger.error(traceback.format_exc())

            error_msg = f"Unhandled error: {str(e)}"
            return err(500, error_msg), 500
예제 #8
0
    def patch(self, model_id, prediction_id):
        """
        Allow updating of links in model
        ---
        produces:
            - application/json
        parameters:
            - in: path
              name: model_id
              description: ID of the Model
              required: true
              type: integer
            - in: path
              name: prediction_id
              description: ID of the Prediction
              required: true
              type: integer
        responses:
            200:
                description: Prediction updated successfully
            404:
                description: Prediction not found to update
            500:
                description: Internal Server Error
        """
        try:
            updated_prediction = request.get_json()

            if updated_prediction is None:
                return err(400, "prediction must be json object"), 400

            prediction_id = PredictionService.patch(prediction_id, updated_prediction)

            return {
                "model_id": model_id,
                "prediction_id": prediction_id
            }, 200
        except NotFound:
            return err(404, "prediction not found"), 404
        except Exception as e:
            error_msg = f'Unhandled error: {str(e)}'
            current_app.logger.error(error_msg)
            return err(500, error_msg), 500
예제 #9
0
파일: ml.py 프로젝트: tossetolab/ml-enabler
    def get(self, model_id):
        """
        Fetch predictions for a model within supplied bbox
        ---
        produces:
            - application/json
        parameters:
            - in: path
              name: model_id
              description: ID of the Model
              required: true
              type: integer
            - in: query
              name: bbox
              description: bbox in the wsen format. Comma separated floats
              required: true
              type: string
        responses:
            200:
                description: List of all predictions for the model within supplied bbox
            404:
                description: No predictions found
            500:
                description: Internal Server Error
        """
        try:
            bbox = request.args.get('bbox', '')
            if (bbox is None or bbox == ''):
                return {"error": 'A bbox is required'}, 400

            # check if this model exists
            ml_model_dto = MLModelService.get_ml_model_by_id(model_id)

            boundingBox = bbox_str_to_list(bbox)
            predictions = PredictionService.get(ml_model_dto.model_id,
                                                boundingBox)
            return predictions, 200
        except PredictionsNotFound:
            return {"error": "Predictions not found"}, 404
        except Exception as e:
            error_msg = f'Unhandled error: {str(e)}'
            current_app.logger.error(error_msg)
            return {"error": error_msg}, 500
예제 #10
0
    def post(self, model_id, prediction_id):
        if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws":
            return err(501, "stack must be in 'aws' mode to use this endpoint"), 501

        payload = request.get_json()

        if payload.get("imagery") is None:
            return err(400, "imagery key required in body"), 400

        pred = PredictionService.get_prediction_by_id(prediction_id)
        image = "models-{model}-prediction-{prediction}".format(
            model=model_id,
            prediction=prediction_id
        )

        stack = "{stack}-{image}".format(
            stack=CONFIG.EnvironmentConfig.STACK,
            image=image
        )

        template = ''
        with open('cloudformation/prediction.template.json', 'r') as file:
            template = file.read()

        try:
            boto3.client('cloudformation').create_stack(
                StackName=stack,
                TemplateBody=template,
                Tags = payload.get("tags", []),
                Parameters=[{
                    'ParameterKey': 'GitSha',
                    'ParameterValue': CONFIG.EnvironmentConfig.GitSha,
                },{
                    'ParameterKey': 'MachineAuth',
                    'ParameterValue': CONFIG.EnvironmentConfig.MACHINE_AUTH
                },{
                    'ParameterKey': 'StackName',
                    'ParameterValue': CONFIG.EnvironmentConfig.STACK,
                },{
                    'ParameterKey': 'ImageTag',
                    'ParameterValue': image,
                },{
                    'ParameterKey': 'Inferences',
                    'ParameterValue': pred.inf_list,
                },{
                    'ParameterKey': 'ModelId',
                    'ParameterValue': str(model_id)
                },{
                    'ParameterKey': 'PredictionId',
                    'ParameterValue': str(prediction_id)
                },{
                    'ParameterKey': 'ImageryId',
                    'ParameterValue': str(payload["imagery"]),
                },{
                    'ParameterKey': 'MaxSize',
                    'ParameterValue': payload.get("maxSize", "1"),
                },{
                    'ParameterKey': 'MaxConcurrency',
                    'ParameterValue': payload.get("maxConcurrency", "50"),
                },{
                    'ParameterKey': 'InfSupertile',
                    'ParameterValue': str(pred.inf_supertile),

                }],

                Capabilities=[
                    'CAPABILITY_NAMED_IAM'
                ],
                OnFailure='ROLLBACK',
            )

            return self.get(model_id, prediction_id)
        except Exception as e:
            error_msg = f'Prediction Stack Creation Error: {str(e)}'
            current_app.logger.error(error_msg)
            return err(500, "Failed to create stack info"), 500
예제 #11
0
    def get(self, model_id, prediction_id):
        req_format = request.args.get('format', 'geojson')
        req_inferences = request.args.get('inferences', 'all')
        req_threshold = request.args.get('threshold', '0')
        req_threshold = float(req_threshold)

        stream = PredictionService.export(prediction_id)
        inferences = PredictionService.inferences(prediction_id)
        pred = PredictionService.get_prediction_by_id(prediction_id)

        first = False

        if req_inferences != 'all':
            inferences = [ req_inferences ]

        def generate_npz():
            nonlocal req_threshold
            labels_dict ={}
            for row in stream:
                if req_inferences != 'all' and row[3].get(req_inferences) is None:
                    continue

                if req_inferences != 'all' and row[3].get(req_inferences) <= req_threshold:
                    continue
                if row[4]:
                    i_lst = pred.inf_list.split(",")

                    #convert raw predictions into 0 or 1 based on threshold
                    raw_pred = []
                    for num, inference in enumerate(i_lst):
                        raw_pred.append(row[3][inference])
                    if  req_inferences == 'all':
                        req_threshold = request.args.get('threshold', '0.5')
                        req_threshold = float(req_threshold)
                    l = [1 if score >= req_threshold else 0 for score in raw_pred]

                    #convert quadkey to x-y-z
                    t = '-'.join([str(i) for i in mercantile.quadkey_to_tile(row[1])])

                    # special case for binary
                    if (pred.inf_binary) and (len(i_lst) != 2):
                        return err(400, "binary models must have two catagories"), 400
                    if (len(i_lst) == 2) and (pred.inf_binary):
                        if list(row[4].values())[0]: #validated and true, keep original
                            labels_dict.update({t:l})
                        else:
                            if l == [1, 0]:
                                l = [0, 1]
                            else:
                                l = [1, 0]
                            labels_dict.update({t:l})
                    else:
                        # for multi-label
                        for key in list(row[4].keys()):
                            i = i_lst.index(key)
                            if not row[4][key]:
                                if l[i] == 0:
                                    l[i] = 1
                                else:
                                    l[i] = 0
                            labels_dict.update({t:l})
            if not labels_dict:
                raise NoValid

            bytestream = io.BytesIO()
            np.savez(bytestream, **labels_dict)
            return bytestream.getvalue()

        def generate():
            nonlocal first
            if req_format == "geojson":
                yield '{ "type": "FeatureCollection", "features": ['
            elif req_format == "csv":
                output = io.StringIO()
                rowdata = ["ID", "QuadKey", "QuadKeyGeom"]
                rowdata.extend(inferences)
                csv.writer(output, quoting=csv.QUOTE_NONNUMERIC).writerow(rowdata)
                yield output.getvalue()

            for row in stream:
                if req_inferences != 'all' and row[3].get(req_inferences) is None:
                    continue

                if req_inferences != 'all' and row[3].get(req_inferences) <= req_threshold:
                    continue

                if req_format == "geojson" or req_format == "geojsonld":
                    properties_dict = {}
                    if row[4]:
                        properties_dict = row[3]
                        valid_dict = {}
                        valid_dict.update({'validity': row[4]})
                        properties_dict.update(valid_dict)
                    else:
                        properties_dict = row[3]
                    feat = {
                        "id": row[0],
                        "quadkey": row[1],
                        "type": "Feature",
                        "properties": properties_dict,
                        "geometry": json.loads(row[2])
                    }
                    if req_format == "geojsonld":
                        yield json.dumps(feat) + '\n'
                    elif req_format == "geojson":
                        if first == False:
                            first = True
                            yield '\n' + json.dumps(feat)
                        else:
                            yield ',\n' + json.dumps(feat)
                elif req_format == "csv":
                    output = io.StringIO()
                    rowdata = [ row[0], row[1], row[2]]
                    for inf in inferences:
                        rowdata.append(row[3].get(inf, 0.0))
                    csv.writer(output, quoting=csv.QUOTE_NONNUMERIC).writerow(rowdata)
                    yield output.getvalue()
                else:
                    return err(501, "not a valid export type, valid export types are: geojson, csv, and npz"), 501

            if req_format == "geojson":
                yield ']}'

        if req_format == "csv":
            mime = "text/csv"
        elif req_format == "geojson":
            mime = "application/geo+json"
        elif req_format == "geojsonld":
            mime = "application/geo+json-seq"
        elif req_format == "npz":
            mime = "application/npz"
        if req_format == "npz":
            try:
                npz = generate_npz()
                return Response(
                response = generate_npz(),
                mimetype = mime,
                status = 200,
                headers = {
                    "Content-Disposition": 'attachment; filename="export.' + req_format + '"'
                }
            )
            except NoValid:
                return err(400, "Can only return npz if predictions are validated. Currently there are no valid predictions"), 400
        else:
            return Response(
                generate(),
                mimetype = mime,
                status = 200,
                headers = {
                    "Content-Disposition": 'attachment; filename="export.' + req_format + '"'
                }
            )
예제 #12
0
    def get(self, project_id, prediction_id):
        """
        Export Geospatial Predictions
        ---
        parameters:
            - name: project_id
              in: path
              schema:
                type: integer
                minimum: 0
              description: The ID of the Project

            - name: prediction_id
              in: path
              schema:
                type: integer
                minimum: 0
              description: The ID of the Project

            - name: format
              in: query
              schema:
                type: string
                default: geojson
                enum:
                  - geojson
                  - geojsonseq
                  - csv
              description: The format to provide records in

            - name: inferences
              in: query
              schema:
                type: string
                default: all
              description: Return all inferences or only a single inference

            - name: threshold
              in: query
              schema:
                type: integer
                default: 0
                minimum: 0
                maximum: 1
              description: The confidence threshold to apply to exported inferences
        responses:
            200:
                description: Exported Data
        """
        req_format = request.args.get("format", "geojson")
        req_inferences = request.args.get("inferences", "all")
        req_threshold = request.args.get("threshold", "0")
        req_threshold = float(req_threshold)

        stream = PredictionService.export(prediction_id)
        inferences = PredictionService.inferences(prediction_id)
        pred = PredictionService.get_prediction_by_id(prediction_id)
        hint = pred.hint
        z = pred.tile_zoom
        i_info = ImageryService.get(pred.imagery_id)
        c_list = ImageryService.get(pred.imagery_id)

        first = False

        if req_inferences != "all":
            inferences = [req_inferences]

        def generate_npz():
            nonlocal req_threshold
            nonlocal hint
            nonlocal z
            nonlocal i_info
            nonlocal c_list

            # get chip list csv as dataframe to match up chip-lst name + geometry with geometry in the predictions database

            labels_dict = {}
            for row in stream:
                if req_inferences != "all" and row[3].get(req_inferences) is None:
                    continue

                if (
                    req_inferences != "all"
                    and row[3].get(req_inferences) <= req_threshold
                ):
                    continue

                # set labels.npz key to be x-y-z tile either from quadkey or wkt geometry
                if i_info["fmt"] == "wms":
                    if row[1]:
                        t = "-".join(
                            [str(i) for i in mercantile.quadkey_to_tile(row[1])]
                        )
                    else:
                        s = shape(json.loads(row[2])).centroid
                        t = "-".join([str(i) for i in mercantile.tile(s.x, s.y, z)])
                if i_info["fmt"] == "list":
                    r = requests.get(c_list["url"])
                    df = pd.read_csv(io.StringIO(r.text))
                    df["c"] = df["bounds"].apply(
                        lambda x: box(*[float(n) for n in x.split(",")])
                    )
                    gdf = gpd.GeoDataFrame(df, crs="EPSG:4326", geometry=df["c"])
                    # get tile name that where chip-list geom and geom in prediction row match
                    gdf_2 = gpd.GeoDataFrame(
                        {"geometry": [shape(json.loads(row[2]))]}, crs="EPSG:4326"
                    )
                    # To-DO account for no overlap case
                    i = gpd.overlay(gdf, gdf_2, how="intersection")
                    tiles_intersection = i["name"].tolist()

                # convert raw predictions into 0 or 1 based on threshold
                raw_pred = []
                i_lst = pred.inf_list.split(",")
                for num, inference in enumerate(i_lst):
                    raw_pred.append(row[3][inference])
                if req_inferences == "all":
                    req_threshold = request.args.get("threshold", "0.5")
                    req_threshold = float(req_threshold)
                binary_pred_list = [1 if score >= req_threshold else 0 for score in raw_pred]

                # special case for training and not predictions
                if hint == "training":
                    if i_info["fmt"] == "list":
                        for chip_name in tiles_intersection:
                            labels_dict.update({chip_name: binary_pred_list})
                    else:
                        labels_dict.update({t: binary_pred_list})
                elif row[4]:
                    t = "-".join([str(i) for i in mercantile.quadkey_to_tile(row[1])])

                    # special case for binary
                    if pred.inf_binary and len(i_lst) != 2:
                        return err(400, "binary models must have two catagories"), 400
                    if len(i_lst) == 2 and pred.inf_binary:
                        if list(row[4].values())[
                            0
                        ]:  # validated and true, keep original
                            labels_dict.update({t: binary_pred_list})
                        else:
                            if binary_pred_list == [1, 0]:
                                binary_pred_list = [0, 1]
                            else:
                                binary_pred_list = [1, 0]
                            labels_dict.update({t: binary_pred_list})
                    else:
                        # for multi-label
                        for key in list(row[4].keys()):
                            i = i_lst.index(key)
                            if not row[4][key]:
                                if binary_pred_list[i] == 0:
                                    binary_pred_list[i] = 1
                                else:
                                    binary_pred_list[i] = 0
                            labels_dict.update({t: binary_pred_list})
            if not labels_dict:
                raise NoValid

            bytestream = io.BytesIO()
            np.savez(bytestream, **labels_dict)
            return bytestream.getvalue()

        def generate():
            nonlocal first
            if req_format == "geojson":
                yield '{ "type": "FeatureCollection", "features": ['
            elif req_format == "csv":
                output = io.StringIO()
                rowdata = ["ID", "QuadKey", "QuadKeyGeom"]
                rowdata.extend(inferences)
                csv.writer(output, quoting=csv.QUOTE_NONNUMERIC).writerow(rowdata)
                yield output.getvalue()
            for row in stream:
                if req_inferences != "all" and row[3].get(req_inferences) is None:
                    continue

                if (
                    req_inferences != "all"
                    and row[3].get(req_inferences) <= req_threshold
                ):
                    continue

                if req_format == "geojson" or req_format == "geojsonld":
                    properties_dict = {}
                    if row[4]:
                        properties_dict = row[3]
                        valid_dict = {}
                        valid_dict.update({"validity": row[4]})
                        properties_dict.update(valid_dict)
                    else:
                        properties_dict = row[3]
                    feat = {
                        "id": row[0],
                        "quadkey": row[1],
                        "type": "Feature",
                        "properties": properties_dict,
                        "geometry": json.loads(row[2]),
                    }
                    if req_format == "geojsonld":
                        yield json.dumps(feat) + "\n"
                    elif req_format == "geojson":
                        if first is False:
                            first = True
                            yield "\n" + json.dumps(feat)
                        else:
                            yield ",\n" + json.dumps(feat)
                elif req_format == "csv":
                    output = io.StringIO()
                    rowdata = [row[0], row[1], row[2]]
                    for inf in inferences:
                        rowdata.append(row[3].get(inf, 0.0))
                    csv.writer(output, quoting=csv.QUOTE_NONNUMERIC).writerow(rowdata)
                    yield output.getvalue()
                else:
                    return (
                        err(
                            501,
                            "not a valid export type, valid export types are: geojson, csv, and npz",
                        ),
                        501,
                    )

            if req_format == "geojson":
                yield "]}"

        if req_format == "csv":
            mime = "text/csv"
        elif req_format == "geojson":
            mime = "application/geo+json"
        elif req_format == "geojsonld":
            mime = "application/geo+json-seq"
        elif req_format == "npz":
            mime = "application/npz"
        if req_format == "npz":
            try:
                return Response(
                    response=generate_npz(),
                    mimetype=mime,
                    status=200,
                    headers={
                        "Content-Disposition": 'attachment; filename="export.'
                        + req_format
                        + '"'
                    },
                )
            except NoValid:
                return (
                    err(
                        400,
                        "Can only return npz if predictions are validated. Currently there are no valid predictions",
                    ),
                    400,
                )
        else:
            return Response(
                generate(),
                mimetype=mime,
                status=200,
                headers={
                    "Content-Disposition": 'attachment; filename="export.'
                    + req_format
                    + '"'
                },
            )
예제 #13
0
파일: ml.py 프로젝트: tossetolab/ml-enabler
    def post(self, model_id):
        """
        Store predictions for an ML Model
        ---
        produces:
            - application/json
        parameters:
            - in: body
              name: body
              required: true
              type: string
              description: JSON object of predictions
              schema:
                properties:
                    modelId:
                        type: integer
                        description: ML Model ID
                        required: true
                    version:
                        type: string
                        description: semver version of the Model
                        required: true
                    dockerhub_hash:
                        type: string
                        description: dockerhub hash
                        required: false
                    bbox:
                        type: array of floats
                        description: BBOX of the predictions
                        required: true
        responses:
            200:
                description: ID of the prediction
            400:
                description: Invalid Request
            500:
                description: Internal Server Error
        """
        try:
            payload = request.get_json()
            version = payload['version']

            # check if this model exists
            ml_model_dto = MLModelService.get_ml_model_by_id(model_id)

            # check if the version is registered
            model_version = MLModelVersionService.get_version_by_model_version(
                ml_model_dto.model_id, version)
            prediction_id = PredictionService.create(model_id,
                                                     model_version.version_id,
                                                     payload)
            return {"prediction_id": prediction_id}, 200

        except VersionNotFound:
            # if not, add it
            try:
                version_array = version_to_array(version)
                version_dto = MLModelVersionDTO()
                version_dto.model_id = model_id
                version_dto.version_major = version_array[0]
                version_dto.version_minor = version_array[1]
                version_dto.version_patch = version_array[2]
                version_id = MLModelVersionService.create_version(version_dto)

                prediction_id = PredictionService.create(
                    model_id, version_id, payload)
                return {"prediction_id": prediction_id}, 200
            except DataError as e:
                current_app.logger.error(f'Error validating request: {str(e)}')
                return str(e), 400
            except Exception as e:
                error_msg = f'Unhandled error: {str(e)}'
                current_app.logger.error(error_msg)
                return {"error": error_msg}, 500
        except NotFound:
            return {"error": "model not found"}, 404
        except DataError as e:
            current_app.logger.error(f'Error validating request: {str(e)}')
            return str(e), 400
        except Exception as e:
            error_msg = f'Unhandled error: {str(e)}'
            current_app.logger.error(error_msg)
            return {"error": error_msg}, 500
예제 #14
0
    def payload(integration_id: int, payload: dict):
        integration = IntegrationService.get_secrets(integration_id)

        if integration is None:
            raise IntegrationNotFound('Integration Not Found')

        if integration.integration != "maproulette":
            raise Exception("Only MapRoulette Integrations supported");

        for ele in ['prediction', 'project', 'project_desc', 'challenge', 'challenge_instr', 'threshold', 'inferences']:
            if payload.get(ele) is None:
                raise Exception('Missing ' + ele + ' key in body')

        auth = integration.auth
        if payload.get('auth') is not None:
            auth = payload.get('auth')

        parsed = urlparse(integration.url)

        config = maproulette.Configuration(
            api_key=auth,
            hostname=parsed.netloc,
            protocol=parsed.scheme
        )

        project_api = maproulette.Project(config)
        challenge_api = maproulette.Challenge(config)

        try:
            project = project_api.get_project_by_name(
                project_name=payload.get('project')
            )
        except:
            project = project_api.create_project(
                data={
                "name": payload.get('project'),
                "display_name": payload.get('project'),
                "description": payload.get('project_desc'),
                "enabled": True
                }
            )

        try:
            challenge = challenge_api.create_challenge(
                data={
                    'name': payload.get('challenge'),
                    'parent': project['data']['id'],
                    'instruction': payload.get('challenge_instr')
                }
            )
        except Exception as e:
            raise e

        req_inferences = payload.get('inferences', 'all')
        req_threshold = float(payload.get('threshold', '0'))

        stream = PredictionService.export(int(payload.get('prediction')))
        inferences = PredictionService.inferences(int(payload.get('prediction')))
        pred = PredictionService.get_prediction_by_id(int(payload.get('prediction')))

        if req_inferences != 'all':
            inferences = [ req_inferences ]

        fc = {
            'type': 'FeatureCollection',
            'features': []
        }

        for row in stream:
            if req_inferences != 'all' and row[3].get(req_inferences) is None:
                continue
            if req_inferences != 'all' and row[3].get(req_inferences) <= req_threshold:
                continue

            properties_dict = {}
            if row[4]:
                properties_dict = row[3]
                valid_dict = {}
                valid_dict.update({'validity': row[4]})
                properties_dict.update(valid_dict)

            feat = {
                "id": row[0],
                "quadkey": row[1],
                "type": "Feature",
                "properties": properties_dict,
                "geometry": json.loads(row[2])
            }

            fc['features'].append(feat)

        challenge_api.add_tasks_to_challenge(
            challenge_id=challenge['data']['id'],
            data=fc
        )

        return {
            "project": project['data']['id'],
            "challenge": challenge['data']['id']
        }
예제 #15
0
    def payload(integration_id: int, payload: dict):
        integration = IntegrationService.get_secrets(integration_id)

        if integration is None:
            raise IntegrationNotFound("Integration Not Found")

        if integration.integration != "maproulette":
            raise Exception("Only MapRoulette Integrations supported")

        for ele in [
                "prediction",
                "project",
                "project_desc",
                "challenge",
                "challenge_instr",
                "threshold",
                "inferences",
        ]:
            if payload.get(ele) is None:
                raise Exception("Missing " + ele + " key in body")

        auth = integration.auth
        if payload.get("auth") is not None:
            auth = payload.get("auth")

        parsed = urlparse(integration.url)

        config = maproulette.Configuration(api_key=auth,
                                           hostname=parsed.netloc,
                                           protocol=parsed.scheme)

        project_api = maproulette.Project(config)
        challenge_api = maproulette.Challenge(config)

        try:
            project = project_api.get_project_by_name(
                project_name=payload.get("project"))
        except Exception:
            project = project_api.create_project(
                data={
                    "name": payload.get("project"),
                    "display_name": payload.get("project"),
                    "description": payload.get("project_desc"),
                    "enabled": True,
                })

        try:
            challenge = challenge_api.create_challenge(
                data={
                    "name": payload.get("challenge"),
                    "parent": project["data"]["id"],
                    "instruction": payload.get("challenge_instr"),
                })
        except Exception as e:
            raise e

        req_inferences = payload.get("inferences", "all")
        req_threshold = float(payload.get("threshold", "0"))

        stream = PredictionService.export(int(payload.get("prediction")))

        feats = {"type": "FeatureCollection", "features": []}

        for row in stream:
            if req_inferences != "all" and row[3].get(req_inferences) is None:
                continue
            if req_inferences != "all" and row[3].get(
                    req_inferences) <= req_threshold:
                continue

            properties_dict = row[3]

            if row[4]:
                valid_dict = {}
                valid_dict.update({"validity": row[4]})
                properties_dict.update(valid_dict)

            properties_dict["mle:id"] = row[0]

            feat = {
                "quadkey": row[1],
                "type": "Feature",
                "properties": properties_dict,
                "geometry": json.loads(row[2]),
            }

            feats["features"].append(feat)

        challenge_api.add_tasks_to_challenge(
            challenge_id=challenge["data"]["id"], data=feats)

        return {
            "project": project["data"]["id"],
            "challenge": challenge["data"]["id"]
        }
예제 #16
0
파일: stacks.py 프로젝트: rustyb/ml-enabler
def post(project_id, prediction_id):
    if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws":
        return err(501,
                   "stack must be in 'aws' mode to use this endpoint"), 501

    payload = request.get_json()

    pred = PredictionService.get_prediction_by_id(prediction_id)
    image = "models-{model}-prediction-{prediction}".format(
        model=project_id, prediction=prediction_id)

    stack = "{stack}-{image}".format(stack=CONFIG.EnvironmentConfig.STACK,
                                     image=image)

    template = ""
    with open("cloudformation/prediction.template.json", "r") as file:
        template = file.read()

    try:
        boto3.client("cloudformation").create_stack(
            StackName=stack,
            TemplateBody=template,
            Tags=payload.get("tags", []),
            Parameters=[
                {
                    "ParameterKey": "GitSha",
                    "ParameterValue": CONFIG.EnvironmentConfig.GitSha,
                },
                {
                    "ParameterKey": "MachineAuth",
                    "ParameterValue": CONFIG.EnvironmentConfig.MACHINE_AUTH,
                },
                {
                    "ParameterKey": "StackName",
                    "ParameterValue": CONFIG.EnvironmentConfig.STACK,
                },
                {
                    "ParameterKey": "ImageTag",
                    "ParameterValue": image,
                },
                {
                    "ParameterKey": "Inferences",
                    "ParameterValue": pred.inf_list,
                },
                {
                    "ParameterKey": "ModelId",
                    "ParameterValue": str(project_id)
                },
                {
                    "ParameterKey": "PredictionId",
                    "ParameterValue": str(prediction_id)
                },
                {
                    "ParameterKey": "ImageryId",
                    "ParameterValue": str(pred.imagery_id),
                },
                {
                    "ParameterKey": "MaxSize",
                    "ParameterValue": payload.get("maxSize", "1"),
                },
                {
                    "ParameterKey": "MaxConcurrency",
                    "ParameterValue": payload.get("maxConcurrency", "50"),
                },
                {
                    "ParameterKey": "InfSupertile",
                    "ParameterValue": str(pred.inf_supertile),
                },
            ],
            Capabilities=["CAPABILITY_NAMED_IAM"],
            OnFailure="ROLLBACK",
        )

        return {"status": "Stack Creation Initiated"}
    except Exception:
        current_app.logger.error(traceback.format_exc())

        return err(500, "Failed to create stack info"), 500
예제 #17
0
    def post(self, project_id, prediction_id):
        """
        Attach a raw model to a given predition
        ---
        produces:
            - application/json
        responses:
            200:
                description: ID of the prediction
            400:
                description: Invalid Request
            500:
                description: Internal Server Error
        """

        if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws":
            return err(501, "stack must be in 'aws' mode to use this endpoint"), 501

        if CONFIG.EnvironmentConfig.ASSET_BUCKET is None:
            return err(501, "Not Configured"), 501

        modeltype = request.args.get("type", "model")
        if modeltype not in ["model", "tfrecord", "checkpoint"]:
            return err(400, "Unsupported type param"), 400

        key = "models/{0}/prediction/{1}/{2}.zip".format(
            project_id, prediction_id, modeltype
        )

        try:
            boto3.client("s3").head_object(
                Bucket=CONFIG.EnvironmentConfig.ASSET_BUCKET, Key=key
            )
        except Exception:
            files = list(request.files.keys())
            if len(files) == 0:
                return err(400, "Model not found in request"), 400

            model = request.files[files[0]]

            # Save the model to S3
            try:
                boto3.resource("s3").Bucket(
                    CONFIG.EnvironmentConfig.ASSET_BUCKET
                ).put_object(Key=key, Body=model.stream)
            except Exception:
                current_app.logger.error(traceback.format_exc())

                return err(500, "Failed to upload model to S3"), 500

            if modeltype == "checkpoint":
                try:
                    PredictionService.patch(
                        prediction_id,
                        {
                            "checkpointLink": CONFIG.EnvironmentConfig.ASSET_BUCKET
                            + "/"
                            + key
                        },
                    )
                except Exception:
                    current_app.logger.error(traceback.format_exc())

                    return err(500, "Failed to save checkpoint state to DB"), 500

            if modeltype == "tfrecord":
                try:
                    PredictionService.patch(
                        prediction_id,
                        {
                            "tfrecordLink": CONFIG.EnvironmentConfig.ASSET_BUCKET
                            + "/"
                            + key
                        },
                    )
                except Exception:
                    current_app.logger.error(traceback.format_exc())

                    return err(500, "Failed to save checkpoint state to DB"), 500

            if modeltype == "model":
                # Save the model link to ensure UI shows upload success
                try:
                    PredictionService.patch(
                        prediction_id,
                        {
                            "modelLink": CONFIG.EnvironmentConfig.ASSET_BUCKET
                            + "/"
                            + key
                        },
                    )
                except Exception:
                    current_app.logger.error(traceback.format_exc())

                    return err(500, "Failed to save model state to DB"), 500

                try:
                    batch = boto3.client(
                        service_name="batch",
                        region_name="us-east-1",
                        endpoint_url="https://batch.us-east-1.amazonaws.com",
                    )

                    # Submit to AWS Batch to convert to ECR image
                    job = batch.submit_job(
                        jobName=CONFIG.EnvironmentConfig.STACK + "ecr-build",
                        jobQueue=CONFIG.EnvironmentConfig.STACK + "-queue",
                        jobDefinition=CONFIG.EnvironmentConfig.STACK + "-build-job",
                        containerOverrides={
                            "environment": [
                                {
                                    "name": "MODEL",
                                    "value": CONFIG.EnvironmentConfig.ASSET_BUCKET
                                    + "/"
                                    + key,
                                }
                            ]
                        },
                    )

                    TaskService.create(
                        {
                            "pred_id": prediction_id,
                            "type": "ecr",
                            "batch_id": job.get("jobId"),
                        }
                    )
                except Exception:
                    current_app.logger.error(traceback.format_exc())

                    return err(500, "Failed to start ECR build"), 500

            return {"status": "model uploaded"}, 200
        else:
            return err(400, "asset exists"), 400
예제 #18
0
    def post(self, model_id, prediction_id):
        """
        Attach a raw model to a given predition
        ---
        produces:
            - application/json
        responses:
            200:
                description: ID of the prediction
            400:
                description: Invalid Request
            500:
                description: Internal Server Error
        """

        if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws":
            return err(501, "stack must be in 'aws' mode to use this endpoint"), 501

        if CONFIG.EnvironmentConfig.ASSET_BUCKET is None:
            return err(501, "Not Configured"), 501

        modeltype = request.args.get('type', 'model')
        if modeltype not in ["model", "tfrecord", "checkpoint"]:
            return err(400, "Unsupported type param"), 400

        key = "models/{0}/prediction/{1}/{2}.zip".format(
            model_id,
            prediction_id,
            modeltype
        )

        try:
            boto3.client('s3').head_object(
                Bucket=CONFIG.EnvironmentConfig.ASSET_BUCKET,
                Key=key
            )
        except:
            files = list(request.files.keys())
            if len(files) == 0:
                return err(400, "Model not found in request"), 400

            model = request.files[files[0]]

            # Save the model to S3
            try:
                boto3.resource('s3').Bucket(CONFIG.EnvironmentConfig.ASSET_BUCKET).put_object(
                    Key=key,
                    Body=model.stream
                )
            except Exception as e:
                error_msg = f'S3 Upload Error: {str(e)}'
                current_app.logger.error(error_msg)
                return err(500, "Failed to upload model to S3"), 500

            if modeltype == "checkpoint":
                try:
                    PredictionService.patch(prediction_id, {
                        "checkpointLink": CONFIG.EnvironmentConfig.ASSET_BUCKET + '/' + key
                    })
                except Exception as e:
                    error_msg = f'SaveLink Error: {str(e)}'
                    current_app.logger.error(error_msg)
                    return err(500, "Failed to save checkpoint state to DB"), 500

            if modeltype == "tfrecord":
                try:
                    PredictionService.patch(prediction_id, {
                        "tfrecordLink": CONFIG.EnvironmentConfig.ASSET_BUCKET + '/' + key
                    })
                except Exception as e:
                    error_msg = f'SaveLink Error: {str(e)}'
                    current_app.logger.error(error_msg)
                    return err(500, "Failed to save checkpoint state to DB"), 500

            if modeltype == "model":
                # Save the model link to ensure UI shows upload success
                try:
                    PredictionService.patch(prediction_id, {
                        "modelLink": CONFIG.EnvironmentConfig.ASSET_BUCKET + '/' + key
                    })
                except Exception as e:
                    error_msg = f'SaveLink Error: {str(e)}'
                    current_app.logger.error(error_msg)
                    return err(500, "Failed to save model state to DB"), 500

                try:
                    batch = boto3.client(
                        service_name='batch',
                        region_name='us-east-1',
                        endpoint_url='https://batch.us-east-1.amazonaws.com'
                    )

                    # Submit to AWS Batch to convert to ECR image
                    batch.submit_job(
                        jobName=CONFIG.EnvironmentConfig.STACK + 'ecr-build',
                        jobQueue=CONFIG.EnvironmentConfig.STACK + '-queue',
                        jobDefinition=CONFIG.EnvironmentConfig.STACK + '-job',
                        containerOverrides={
                            'environment': [{
                                'name': 'MODEL',
                                'value': CONFIG.EnvironmentConfig.ASSET_BUCKET + '/' + key
                            }]
                        }
                    )
                except Exception as e:
                    error_msg = f'Batch Error: {str(e)}'
                    current_app.logger.error(error_msg)
                    return err(500, "Failed to start ECR build"), 500

            return { "status": "model uploaded" }, 200
        else:
            return err(400, "model exists"), 400
예제 #19
0
파일: ml.py 프로젝트: tossetolab/ml-enabler
    def post(self, prediction_id):
        """
        Submit tile level predictions
        ---
        produces:
            - application/json
        parameters:
            - in: body
              name: body
              required: true
              type: string
              description: JSON object of predictions
              schema:
                properties:
                    predictionId:
                        type: integer
                        description: Prediction ID
                        required: true
                    predictions:
                        type: array
                        items:
                            type: object
                            schema:
                                properties:
                                    quadkey:
                                        type: string
                                        description: quadkey of the tile
                                        required: true
                                    centroid:
                                        type: array
                                        items:
                                            type: float
                                        required: true
                                    predictions:
                                        type: object
                                        schema:
                                            properties:
                                                ml_prediction:
                                                    type: float
        responses:
            200:
                description: ID of the prediction
            400:
                description: Invalid Request
            500:
                description: Internal Server Error
        """
        try:
            prediction_dto = PredictionService.get_prediction_by_id(
                prediction_id)
            data = request.get_json()
            if (len(data['predictions']) == 0):
                return {"error": "Error validating request"}, 400

            PredictionTileService.create(prediction_dto, data)

        except PredictionsNotFound:
            return {"error": "Prediction not found"}, 404
        except Exception as e:
            error_msg = f'Unhandled error: {str(e)}'
            current_app.logger.error(error_msg)
            return {"error": error_msg}, 500
예제 #20
0
    def post(self, project_id, prediction_id):
        """
        Given a GeoJSON, xyz list, or tile list, submit it to the SQS queue
        ---
        produces:
            - application/json
        responses:
            200:
                description: Status Update
        """

        if CONFIG.EnvironmentConfig.ENVIRONMENT != "aws":
            return err(501, "stack must be in 'aws' mode to use this endpoint"), 501

        payload = request.data

        tiler = tileschemes.WebMercator()

        try:
            prediction = PredictionService.get_prediction_by_id(prediction_id)
            imagery = ImageryService.get(prediction.imagery_id)

            queue_name = "{stack}-models-{model}-prediction-{prediction}-queue".format(
                stack=CONFIG.EnvironmentConfig.STACK,
                model=project_id,
                prediction=prediction_id,
            )

            queue = boto3.resource("sqs").get_queue_by_name(QueueName=queue_name)

            tiles = []
            payloadjson = json.loads(payload)
            if imagery["fmt"] == "wms":
                if type(payloadjson) is list:
                    for tile in payloadjson:
                        tile = tile.split("-")
                        tiles.append(
                            mercantile.Tile(int(tile[0]), int(tile[1]), int(tile[2]))
                        )
                else:

                    poly = shape(geojson.loads(payload))

                    project = partial(
                        pyproj.transform,
                        pyproj.Proj(init="epsg:4326"),
                        pyproj.Proj(init="epsg:3857"),
                    )

                    poly = transform(project, poly)

                    tiles = tilecover.cover_geometry(tiler, poly, prediction.tile_zoom)

                cache = []
                for tile in tiles:
                    cache.append(
                        {
                            "Id": str(tile.z) + "-" + str(tile.x) + "-" + str(tile.y),
                            "MessageBody": json.dumps(
                                {
                                    "name": "{x}-{y}-{z}".format(
                                        x=tile.x, y=tile.y, z=tile.z
                                    ),
                                    "url": imagery["url"].format(
                                        x=tile.x, y=tile.y, z=tile.z
                                    ),
                                    "bounds": mercantile.bounds(tile.x, tile.y, tile.z),
                                    "x": tile.x,
                                    "y": tile.y,
                                    "z": tile.z,
                                }
                            ),
                        }
                    )

                    if len(cache) == 10:
                        queue.send_messages(Entries=cache)

                        cache = []

                if len(cache) > 0:
                    queue.send_messages(Entries=cache)

                return {}, 200
            elif imagery["fmt"] == "list":

                r = requests.get(imagery["url"])
                r.raise_for_status()

                f = StringIO(r.text)
                cache = []
                for row in csv.reader(f, delimiter=","):
                    cache.append(
                        {
                            "Id": row[0],
                            "MessageBody": json.dumps(
                                {
                                    "name": row[0],
                                    "url": row[1],
                                    "bounds": row[2].split(","),
                                }
                            ),
                        }
                    )

                    if len(cache) == 10:
                        queue.send_messages(Entries=cache)
                        cache = []

                if len(cache) > 0:
                    queue.send_messages(Entries=cache)

                return {}, 200

            else:
                return err(400, "Unknown imagery type"), 400
        except Exception as e:
            current_app.logger.error(traceback.format_exc())

            error_msg = f"Prediction Tiler Error: {str(e)}"
            return err(500, error_msg), 500