예제 #1
0
def style_transfer_api(model_handle="fast-style-transfer",
                       style_name="candy") -> Response:
    # check if its a valid style
    if style_name not in MODEL_REGISTER[model_handle]["model_stack"]:
        return make_response(
            jsonify({
                "error":
                f"{style_name} not in model_stack of {model_handle}"
            }),
            404,
        )

    # get the input image from the request
    returned_val: Union[Response,
                        Image] = get_image_from_request(from_request=request,
                                                        file_key="file")

    # if a response is already created during process i.e. an error, then return that
    if isinstance(returned_val, Response):
        response: Response = returned_val
        return response

    image: Image = returned_val

    # now process the image
    style_transfer = get_style_transfer_function(model_handle, style_name)
    output: Image = style_transfer(image)

    # convert it to b64 bytes
    b64_image = image2b64(output)
    return make_response(jsonify(b64_image), 200)
예제 #2
0
def get_human_pose() -> Response:
    """

    Handles the human pose POST request, takes the pose image and identifies the human pose keypoints,
        stitches them together and returns a response with the image as b64 encoded, with the detected human pose

    Returns:
        (Response): b64 image string with the detected human pose

    """
    from models import get_pose

    if "file" not in request.files:
        return Response({"error": "No file part"}, status=412)

    file: FileStorage = request.files["file"]

    if file.filename == "":
        return Response({"error": "No file selected"}, status=417)

    if allowed_file(file.filename):
        image: Image = file2image(file)
        pose_img = get_pose(image)

        # convert it to b64 bytes
        b64_pose = image2b64(pose_img)

        return jsonify(b64_pose), 200

    else:
        return Response({"error": f"{file.mimetype} not allowed"}, status=412)
예제 #3
0
def autoencoder_api(model_handle="red-car-autoencoder") -> Response:
    """

    autoencoder_api

        This end point is used to encode an image and then get the latentz vector as well as
            the reconstructed image, this kind of technique can be used for image compression
            and video compression, but right now only supports images and specific type of input
            data.
        The latentz vector is a unique representation of the input, and thus the latentz given
            to a encoder and reconstruct the image exactly, thus reducing the data transmitted.

    Args:
        model_handle:  the model handle string, must be in the MODEL_REGISTER

    Returns:
        Response: The response is a JSON containing the reconstructed image and the latent z
            vector for the image

    """
    if model_handle not in MODEL_REGISTER:
        return make_response(
            jsonify(
                {"error": f"{model_handle} not found in registered models"}),
            404)

    if (model_handle in MODEL_REGISTER and
            MODEL_REGISTER[model_handle]["type"] != "variational-autoencoder"):
        return make_response(
            jsonify({"error": f"{model_handle} model is not an AutoEncoder"}),
            412)

    if "file" not in request.files:
        return make_response(jsonify({"error": "No file part"}), 412)

    file: FileStorage = request.files["file"]

    if file.filename == "":
        return make_response(jsonify({"error": "No file selected"}), 417)

    if allowed_file(file.filename):
        image: Image = file2image(file)
        autoencoder = get_autoencoder(model_handle)
        output: Image
        latent_z: np.ndarray
        output, latent_z = autoencoder(image)

        # convert it to b64 bytes
        b64_image = image2b64(output)
        return make_response(
            jsonify(dict(recon_image=b64_image, latent_z=latent_z.tolist())),
            200)

    else:
        return make_response(
            jsonify({"error": f"{file.mimetype} not allowed"}), 412)
예제 #4
0
def generator_api(model_handle="red-car-gan-generator") -> Response:
    """
    generator_api

        This is the generator end point, that has the model handle as the parameter
            and takes in the latent_z values in the POST requests, followed by passing this
            vector to the model and generates an image, which is returned as a b64 image in
            the Response
    Args:
        model_handle: the model handle string

    Returns:
        Response: the base 64 encoded generated image

    """
    if model_handle not in MODEL_REGISTER:
        return make_response(
            jsonify(
                {"error": f"{model_handle} not found in registered models"}),
            404)

    if (model_handle in MODEL_REGISTER
            and MODEL_REGISTER[model_handle]["type"] != "gan-generator"):
        return make_response(
            jsonify({"error": f"{model_handle} model is not a GAN"}), 412)

    if "latent_z_size" in MODEL_REGISTER[model_handle]:
        # this is a latentz input type of gan model
        if "latent_z" not in request.form:
            return make_response(
                jsonify({"error": "latent_z not found in the form"}), 412)

        latent_z = json.loads(f"[{request.form['latent_z']}]")
        latent_z = np.array(latent_z, dtype=np.float32)

        generator = get_generator(model_handle)
        output = generator(latent_z)

        # convert it to b64 bytes
        b64_image = image2b64(output)

        return make_response(jsonify(b64_image), 200)

    if "input_shape" in MODEL_REGISTER[model_handle]:
        # this is a image input type of gan model

        if "file" not in request.files:
            return make_response(jsonify({"error": "No file part"}), 412)

        file: FileStorage = request.files["file"]

        if file.filename == "":
            return make_response(jsonify({"error": "No file selected"}), 417)

        if allowed_file(file.filename):
            image: Image = file2image(file)
            generator = get_generator(model_handle)
            output = generator(image)

            # convert it to b64 bytes
            b64_image = image2b64(output)

            return make_response(jsonify(b64_image), 200)

    return make_response(
        jsonify({"error": f"{model_handle} is not a valid GAN"}), 412)