def face_swap_api():

    if 'files' not in request.files:
        return Response({'error': 'No files selected'}, status=412)

    files = request.files.getlist('files')

    if len(files) != 2:
        return Response({'error': 'Select Two Faces (Images)'}, status=412)

    face_one = files[0]
    face_two = files[1]

    if allowed_file(face_one.filename) and allowed_file(face_two.filename):
        f1_image = file2image(face_one)
        f2_image = file2image(face_two)

        logger.info(f'Running FaceSwap')
        swapped_face = swap_faces(f1_image, f2_image)

        # convert it to bytes
        b64_image = image2b64(swapped_face)

        return jsonify(b64_image), 200

    else:
        return Response({'error': f'{face_one.mimetype} not allowed'},
                        status=412)
Exemplo n.º 2
0
def classify_image_api(model_handle="resnet34-imagenet") -> Response:
    """

    Args:
        model_handle: the model handle string, should be in `models.model_handler.MODEL_REGISTER`

    Returns:
        (Response): if error then a json of {'error': 'message'} is sent
                    else return a json of sorted List[Dict[{'class_idx': idx, 'class_name': cn, 'confidence': 'c'}]]
    """
    if model_handle not in MODEL_REGISTER:
        return Response(
            {"error": f"{model_handle} not found in registered models"},
            status=404)

    if "file" not in request.files:
        return Response({"error": "No file part"}, status=412)

    file: FileStorage = request.files["file"]

    if file.filename == "":
        return Response({"error": "No file selected"}, status=417)

    if allowed_file(file.filename):
        image: Image = file2image(file)
        classifier = get_classifier(model_handle)
        output = classifier(image)
        return Response(json.dumps(output), status=200)

    else:
        return Response({"error": f"{file.mimetype} not allowed"}, status=412)
Exemplo n.º 3
0
def get_human_pose() -> Response:
    """

    Handles the human pose POST request, takes the pose image and identifies the human pose keypoints,
        stitches them together and returns a response with the image as b64 encoded, with the detected human pose

    Returns:
        (Response): b64 image string with the detected human pose

    """
    from models import get_pose

    if "file" not in request.files:
        return Response({"error": "No file part"}, status=412)

    file: FileStorage = request.files["file"]

    if file.filename == "":
        return Response({"error": "No file selected"}, status=417)

    if allowed_file(file.filename):
        image: Image = file2image(file)
        pose_img = get_pose(image)

        # convert it to b64 bytes
        b64_pose = image2b64(pose_img)

        return jsonify(b64_pose), 200

    else:
        return Response({"error": f"{file.mimetype} not allowed"}, status=412)
Exemplo n.º 4
0
def autoencoder_api(model_handle="red-car-autoencoder") -> Response:
    """

    autoencoder_api

        This end point is used to encode an image and then get the latentz vector as well as
            the reconstructed image, this kind of technique can be used for image compression
            and video compression, but right now only supports images and specific type of input
            data.
        The latentz vector is a unique representation of the input, and thus the latentz given
            to a encoder and reconstruct the image exactly, thus reducing the data transmitted.

    Args:
        model_handle:  the model handle string, must be in the MODEL_REGISTER

    Returns:
        Response: The response is a JSON containing the reconstructed image and the latent z
            vector for the image

    """
    if model_handle not in MODEL_REGISTER:
        return make_response(
            jsonify(
                {"error": f"{model_handle} not found in registered models"}),
            404)

    if (model_handle in MODEL_REGISTER and
            MODEL_REGISTER[model_handle]["type"] != "variational-autoencoder"):
        return make_response(
            jsonify({"error": f"{model_handle} model is not an AutoEncoder"}),
            412)

    if "file" not in request.files:
        return make_response(jsonify({"error": "No file part"}), 412)

    file: FileStorage = request.files["file"]

    if file.filename == "":
        return make_response(jsonify({"error": "No file selected"}), 417)

    if allowed_file(file.filename):
        image: Image = file2image(file)
        autoencoder = get_autoencoder(model_handle)
        output: Image
        latent_z: np.ndarray
        output, latent_z = autoencoder(image)

        # convert it to b64 bytes
        b64_image = image2b64(output)
        return make_response(
            jsonify(dict(recon_image=b64_image, latent_z=latent_z.tolist())),
            200)

    else:
        return make_response(
            jsonify({"error": f"{file.mimetype} not allowed"}), 412)
Exemplo n.º 5
0
def get_image_from_request(from_request: Request,
                           file_key: str) -> Union[Response, Image]:
    file: FileStorage = from_request.files[file_key]

    if file.filename == "":
        return make_response(jsonify({"error": "No file selected"}), 417)

    if allowed_file(file.filename):
        image: Image = file2image(file)
        return image

    else:
        return make_response(
            jsonify({"error": f"{file.mimetype} not allowed"}), 412)
Exemplo n.º 6
0
def inference():
    if 'file' not in request.files:
        return Response({'error': 'No file part'}, status=412)

    file: FileStorage = request.files['file']

    if file.filename == '':
        return Response({'error': 'No file selected'}, status=417)

    if allowed_file(file.filename):
        image = file2image(file)
        predictions: array = detect_apparel(image)
        apparels = fetch_apparel(
            image,
            predictions)  # sent return key value pairs of apparels found
        # output = { CLASSES[idx] : image2b64(apparel) for idx, apparel in enumerate(apparels)}
        return jsonify(apparels), 200

    else:
        return Response({'error': f'{file.mimetype} not allowed'}, status=412)
Exemplo n.º 7
0
def classify_image_api(model_handle='resnet34-imagenet'):
    if model_handle not in MODEL_REGISTER:
        return Response({'error': f'{model_handle} not found in registered models'}, status=404)

    if 'file' not in request.files:
        return Response({'error': 'No file part'}, status=412)

    file: FileStorage = request.files['file']

    if file.filename == '':
        return Response({'error': 'No file selected'}, status=417)

    if allowed_file(file.filename):
        image = file2image(file)
        classifier = get_classifier(model_handle)
        output = classifier(image)
        return Response(json.dumps(output), status=200)

    else:
        return Response({'error': f'{file.mimetype} not allowed'}, status=412)
Exemplo n.º 8
0
def inference():
    if request.method == 'GET':
        return render_template('inference.html', current_page='inference')

    if 'file' not in request.files:
        return Response({'error': 'No file part'}, status=412)

    file: FileStorage = request.files['file']

    if file.filename == '':
        return Response({'error': 'No file selected'}, status=417)

    if allowed_file(file.filename):
        image = file2image(file)
        predictions: array = detect_apparel(image)
        apparels = fetch_apparel(
            image,
            predictions)  # sent return key value pairs of apparels found
        # output = { CLASSES[idx] : image2b64(apparel) for idx, apparel in enumerate(apparels)}
        ### !! Not sure how to add render and display separate cards/containers for each apparel
        return jsonify(apparels), 200

    else:
        return Response({'error': f'{file.mimetype} not allowed'}, status=412)
def face_align_api():
    if 'file' not in request.files:
        return Response({'error': 'No file part'}, status=412)

    file: FileStorage = request.files['file']

    if file.filename == '':
        return Response({'error': 'No file selected'}, status=417)

    if allowed_file(file.filename):
        image = file2image(file)

        logger.info(f'Running Align Face on {file.filename}')

        # align the face
        aligned_face = align_face(image)

        # convert it to bytes
        b64_image = image2b64(aligned_face)

        return jsonify(b64_image), 200

    else:
        return Response({'error': f'{file.mimetype} not allowed'}, status=412)
Exemplo n.º 10
0
def generator_api(model_handle="red-car-gan-generator") -> Response:
    """
    generator_api

        This is the generator end point, that has the model handle as the parameter
            and takes in the latent_z values in the POST requests, followed by passing this
            vector to the model and generates an image, which is returned as a b64 image in
            the Response
    Args:
        model_handle: the model handle string

    Returns:
        Response: the base 64 encoded generated image

    """
    if model_handle not in MODEL_REGISTER:
        return make_response(
            jsonify(
                {"error": f"{model_handle} not found in registered models"}),
            404)

    if (model_handle in MODEL_REGISTER
            and MODEL_REGISTER[model_handle]["type"] != "gan-generator"):
        return make_response(
            jsonify({"error": f"{model_handle} model is not a GAN"}), 412)

    if "latent_z_size" in MODEL_REGISTER[model_handle]:
        # this is a latentz input type of gan model
        if "latent_z" not in request.form:
            return make_response(
                jsonify({"error": "latent_z not found in the form"}), 412)

        latent_z = json.loads(f"[{request.form['latent_z']}]")
        latent_z = np.array(latent_z, dtype=np.float32)

        generator = get_generator(model_handle)
        output = generator(latent_z)

        # convert it to b64 bytes
        b64_image = image2b64(output)

        return make_response(jsonify(b64_image), 200)

    if "input_shape" in MODEL_REGISTER[model_handle]:
        # this is a image input type of gan model

        if "file" not in request.files:
            return make_response(jsonify({"error": "No file part"}), 412)

        file: FileStorage = request.files["file"]

        if file.filename == "":
            return make_response(jsonify({"error": "No file selected"}), 417)

        if allowed_file(file.filename):
            image: Image = file2image(file)
            generator = get_generator(model_handle)
            output = generator(image)

            # convert it to b64 bytes
            b64_image = image2b64(output)

            return make_response(jsonify(b64_image), 200)

    return make_response(
        jsonify({"error": f"{model_handle} is not a valid GAN"}), 412)