Exemplo n.º 1
0
def route_api_query_image():
    """
    api endpoint to query an image
    this is the 'main' api endpoint for the application

    reads two pieces of data from the request: "data" and "file"
    the file is the binary image
    the data is json payload of other request data,
        which is just the lesion point clicked on by the use

    Returns:
        json response of:
            the two probabilites from two models
            the filtered and segmented image
            the extracted patch
            list of similar images
    """
    if not auth_check(request.authorization):
        return auth_fail()
    request_data = json.loads(request.form["data"])
    f = request.files["file"]
    pil_img = Image.open(f)
    img = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
    filtered_img = preprocess.preprocess(img)
    filtered_img = g_data["segmenter"].segmenter(filtered_img)

    translated_patch_coordinates = preprocess.translate_patch_coordinates(
        filtered_img, request_data["point"])
    patch = util.extract_patch(
        filtered_img,
        (translated_patch_coordinates["y"], translated_patch_coordinates["x"]),
        preprocess.PATCH_SIZE)
    patch = preprocess.preprocess(patch)

    filtered_img = preprocess.preprocess(filtered_img)
    prob1 = g_data["classifier1"].classify(filtered_img)
    prob2 = g_data["classifier2"].classify(patch)
    prob3 = g_data["classifier3"].classify(filtered_img)
    if math.isnan(prob2):
        prob2 = None

    similarities = g_data["hash_similarity"].query_image(patch)
    similarities = g_data["labels"].add_labels_to_similarity_list(similarities)

    img_b64 = img_to_base64(filtered_img)
    patch_b64 = img_to_base64(patch)
    res = {
        "filtered_img": img_b64.decode("utf-8"),
        "patch": patch_b64.decode("utf-8"),
        "probability1": prob1,
        "probability2": prob2,
        "probability3": prob3,
        "similar_images": similarities,
    }
    return jsonify(res)
Exemplo n.º 2
0
def main():
    """
    this script automatically segments each images using the lung segmentation model

    CLI Args:
        1: the path to the lung segmentation model file
        2: the path to the directory with all the images that will be segmented
        3: the path to the output directory where all the segmented images will be written with the same filename
    """
    model_file = sys.argv[1]
    image_dir = sys.argv[2]
    out_dir = sys.argv[3]

    graph = tf.get_default_graph()
    segmenter = Segmenter(model_file, graph)

    filenames = os.listdir(image_dir)
    for fn in filenames:
        img = Image.open(opj(image_dir, fn))
        #img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
        img = preprocess.preprocess(np.array(img))
        img = segmenter.segmenter(img)
        img = Image.fromarray(np.uint8(img * 255))
        img = img.convert("L")
        img.save(opj(out_dir, fn))
Exemplo n.º 3
0
def main():
    """
    this script creates a results.csv in the output directory, which is used by the test-model-stats.py script for creating results graphs and statistics
    this scripts is for testing the VGG16 based model

    CLI Args:
        1: the labels file for the test set of subjects
        2: the directory of PNG image slices
        3: the output directory for the results
        4: the classifier model file
        5: the lung segmentation model file
    """
    test_labels = sys.argv[1]
    image_dir = sys.argv[2]
    out_dir = sys.argv[3]
    model_file = sys.argv[4]
    seg_model_file = sys.argv[5]

    graph = tf.get_default_graph()
    segmenter = model.Segmenter(seg_model_file, graph)
    classifier = model.Classifier2(model_file, graph)

    file_data = []
    with open(test_labels) as f:
        reader = csv.reader(f)
        header = next(reader)
        header.append("Prediction")

        for line in reader:
            fn = line[0]
            img = Image.open(opj(image_dir, fn + ".png"))
            img = preprocess.preprocess(np.array(img))
            img = segmenter.segmenter(img)
            img = preprocess.preprocess(img)
            prediction = classifier.classify(img)
            line.append(prediction)
            file_data.append(line)

    with open(opj(out_dir, "results.csv"), "w") as f:
        writer = csv.writer(f)
        writer.writerow(header)
        for line in file_data:
            writer.writerow(line)