Esempio n. 1
0
def best_box(images):
    result_images = []
    for image in images:
        result_image = Image(image.id)

        # Group boxes by label
        label_dict = dict()
        for box in image.inputBoxes:
            if box.label in label_dict:
                label_dict[box.label].append(box)
            else:
                label_dict[box.label] = [box]

        # Generate output boxes
        for key in label_dict:
            boxes = label_dict[key]

            # Find the box with the highest score
            best_box = boxes[0]
            for box in boxes:
                if box.score > best_box.score:
                    best_box = box

            # Store and return results
            if best_box.score > 0.35:
                result_image.outputBoxes.append(best_box)
        result_images.append(result_image)
    return result_images
Esempio n. 2
0
def best_cluster_haehn(images):
    result_images = []
    for image in images:
        groups = {}
        result_image = Image(image.id)

        inputBoxes_temp = image.inputBoxes.copy()

        # grouping algorithm
        while len(inputBoxes_temp) > 0:
            best_box = max(
                inputBoxes_temp,
                key=lambda box: box.score)  # get a box with best score

            # loop till best_box score is too low
            if best_box.score < 0.35:
                break

            inputBoxes_temp.pop(inputBoxes_temp.index(
                best_box))  # remove best_box from the list
            if best_box not in groups:
                groups[best_box] = []

            # group best_box with any other boxes that overlapped this box
            for box in inputBoxes_temp:
                if box.iou(best_box) > 0.3 and box.score > 0.1:
                    groups[best_box].append(box)
                    inputBoxes_temp.remove(box)

        # loop through each group to create new resulting boxes
        for bestbox, candidates in groups.items():
            result_box = Box(bestbox.label, 0, 0, 0, 0, bestbox.score)

            # Get the combined score of all boxes with this label
            total_score = sum(box.score**3 for box in candidates)

            for box in candidates:
                # Average out the x and y coordinates of the boxes
                dominance = box.score**3 / total_score
                result_box.x1s += box.x1s * dominance
                result_box.y1s += box.y1s * dominance
                result_box.x2s += box.x2s * dominance
                result_box.y2s += box.y2s * dominance

            result_image.outputBoxes.append(result_box)
        result_images.append(result_image)
    return result_images
Esempio n. 3
0
def haehn(images):
    result_images = []
    for image in images:
        result_image = Image(image.id)

        # Group boxes by label
        label_dict = dict()
        for box in image.inputBoxes:
            if box.label in label_dict:
                label_dict[box.label].append(box)
            else:
                label_dict[box.label] = [box]

        # Generate output boxes
        for key in label_dict:
            boxes = label_dict[key]

            # Get the combined score of all boxes with this label
            total_score = sum(box.score**3 for box in boxes)

            # Create the output box
            if total_score > 0.1:
                box_meets_threshhold = False
                result_box = Box(key, 0, 0, 0, 0)
                for box in boxes:

                    # Make sure at least one of the boxes meets threshhold
                    if box.score > 0.5:
                        box_meets_threshhold = True

                    # Average out the x and y coordinates of the boxes
                    dominance = box.score**3 / total_score
                    result_box.x1s += box.x1s * dominance
                    result_box.y1s += box.y1s * dominance
                    result_box.x2s += box.x2s * dominance
                    result_box.y2s += box.y2s * dominance
                    result_box.score = box.score  # not gonna work well

                # Store and return results
                if box_meets_threshhold:
                    result_image.outputBoxes.append(result_box)
        result_images.append(result_image)
    return result_images
Esempio n. 4
0
    def parse_image_list(self, images_dict):
        """
        Converts data from the CSV format into a list of Image objects.

        Args:
            images_dict: data formatted as dicts of parallel arrays
            i.e. { "img_001": {"labels": ["tooth_1", "tooth_2"], "scores": [0.5, 0.2]} }

        Returns:
            arr: array of Image objects
        """

        images = []
        for key, value in images_dict.items():
            boxes = self.parse_box_list(value)
            img = None
            if "img_type" in value:
                img = value['img_type']

            images.append(Image(key, boxes, img))

        return images
Esempio n. 5
0
test_box = Box("tooth_1", 1, 2, 3, 4, 0.5)
error_message = "InputBox failed a test"
assert test_box.label == "tooth_1", error_message
assert test_box.x1s == 1, error_message
assert test_box.y1s == 2, error_message
assert test_box.x2s == 3, error_message
assert test_box.y2s == 4, error_message
assert test_box.score == 0.5, error_message


# Test Image class
boxes = [
    Box("tooth_14", 514, 1075, 438, 121, 0.6),
    Box("tooth_15", 1070, 1568, 457, 256, 0.7888)
]
test_image = Image("img_001", boxes)
error_message = "Image class failed a test"
assert test_image.id == "img_001", error_message
assert test_image.inputBoxes[0].label == boxes[0].label, error_message
assert test_image.inputBoxes[0].x1s == boxes[0].x1s, error_message
assert test_image.inputBoxes[0].y1s == boxes[0].y1s, error_message
assert test_image.inputBoxes[0].x2s == boxes[0].x2s, error_message
assert test_image.inputBoxes[0].y2s == boxes[0].y2s, error_message
assert test_image.inputBoxes[0].score == boxes[0].score, error_message
assert test_image.inputBoxes[1].label == boxes[1].label, error_message
assert test_image.inputBoxes[1].x1s == boxes[1].x1s, error_message
assert test_image.inputBoxes[1].y1s == boxes[1].y1s, error_message
assert test_image.inputBoxes[1].x2s == boxes[1].x2s, error_message
assert test_image.inputBoxes[1].y2s == boxes[1].y2s, error_message
assert test_image.inputBoxes[1].score == boxes[1].score, error_message
Esempio n. 6
0
def missing_tooth(images_pred):
    result_images = []
    # Goes through every image in images_pred
    for i, image in enumerate(images_pred):
        result_image = Image(image.id)

        # SORT STARTS HERE
        upper_pred = []
        lower_pred = []
        # sort the boxes by x1 value

        image.outputBoxes = sorted(image.outputBoxes, key=lambda box: box.x1s)
        # separate upper and lower teeth in this image
        for box in image.outputBoxes:
            # get only the number of this tooth label
            label_num = int(box.label.strip('tooth_'))
            # Upper
            if label_num in upper_teeth:
                upper_pred.append(box)
            # Lower
            elif label_num in lower_teeth:
                lower_pred.append(box)
            # Error
            else:
                print('Error sorting teeth in missing_tooth.py: ' +
                      str(label_num))
        #SORTING BANANANAS

        if (len(upper_pred) != 0):
            oldX1 = upper_pred[0].x1s
            oldX2 = upper_pred[0].x2s
            oldY1 = upper_pred[0].y1s
            oldY2 = upper_pred[0].y2s
            isMissingTeeth = False
            number_of_missing_teeth = 0

            for j, box in enumerate(upper_pred):
                teeth_gap = ((oldX2 - oldX1) +
                             (upper_pred[j].x2s - upper_pred[j].x1s)) / 1
                if (isMissingTeeth):
                    label_num = int(
                        box.label.strip('tooth_')) + number_of_missing_teeth
                    upper_pred[j].label = "tooth_" + str(label_num)
                if ((upper_pred[j].x1s - oldX2) > teeth_gap):
                    number_of_missing_teeth += 1
                    isMissingTeeth = True
                    label_num = int(
                        box.label.strip('tooth_')) + number_of_missing_teeth
                    upper_pred[j].label = "tooth_" + str(label_num)
                    # print(image.id)
                    # print(upper_pred[j].label)
                    # print(upper_pred[j].x1s - oldX2)
                oldX1 = upper_pred[j].x1s
                oldX2 = upper_pred[j].x2s
                oldY1 = upper_pred[j].y1s
                oldY2 = upper_pred[j].y2s
                result_image.outputBoxes.append(upper_pred[j])

        if (len(lower_pred) != 0):
            oldX1 = lower_pred[0].x1s
            oldX2 = lower_pred[0].x2s
            oldY1 = lower_pred[0].y1s
            oldY2 = lower_pred[0].y2s
            isMissingTeeth = False
            number_of_missing_teeth = 0

            for j, box in enumerate(lower_pred):
                teeth_gap = ((oldX2 - oldX1) +
                             (lower_pred[j].x2s - lower_pred[j].x1s)) / 1
                if (isMissingTeeth):
                    label_num = int(
                        box.label.strip('tooth_')) + number_of_missing_teeth
                    lower_pred[j].label = "tooth_" + str(label_num)
                if ((lower_pred[j].x1s - oldX2) > teeth_gap):
                    number_of_missing_teeth -= 1
                    isMissingTeeth = True
                    label_num = int(
                        box.label.strip('tooth_')) + number_of_missing_teeth
                    lower_pred[j].label = "tooth_" + str(label_num)
                    # print(image.id)
                    # print(lower_pred[j].label)
                    # print(lower_pred[j].x1s - oldX2)
                    # print(lower_pred[j].x1s, oldX2)
                oldX1 = lower_pred[j].x1s
                oldX2 = lower_pred[j].x2s
                oldY1 = lower_pred[j].y1s
                oldY2 = lower_pred[j].y2s
                result_image.outputBoxes.append(lower_pred[j])
        result_images.append(result_image)
    return result_images
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--scale', help='select output folder', default=None)
    parser.add_argument('--resultfile',
                        help='select result file',
                        default=None)
    parser.add_argument('--predictionfile',
                        help='select result file',
                        default=None)
    parser.add_argument('--net', help='describe net', default=None)
    parser.add_argument('--overlap', help='select output folder', default=None)
    parser.add_argument('--dilate', help='select output folder', default=False)
    args = parser.parse_args()

    config = Config

    if args.scale == '1':
        config.scale = True
    if args.resultfile:
        config.resultfile = args.resultfile
    else:
        print("No result file provided")
        exit()
    if args.net:
        config.net = args.net
    if args.overlap:
        config.overlap = int(args.overlap)

    path_to_img = []
    tiles = []
    images = []
    scales = []
    scales_new = []
    path_to_save = []
    start = 1
    with open(args.resultfile) as csv_file:
        csv_reader = csv.reader(csv_file)
        for row in csv_reader:
            path_to_img.append(row[0])
            if start == 1:
                path_to_save.append(row[0])
                start = 0
            else:
                if path_to_save[-1] != row[0]:
                    path_to_save.append(row[0])
            scales.append(float(row[1]))
            tiles.append(int(row[2]))

    print(config.scale)
    tools = Tools()
    print("Loading predictions ...")
    predictions = h5py.File(args.predictionfile, 'r')['predictions']

    tile_ind = 0
    for i in range(0, tiles.__len__()):
        if (tiles[i] == 0):
            if (os.path.basename(path_to_img[i]).split('.')[1]
                    == 'tif') or (os.path.basename(
                        path_to_img[i]).split('.')[1] == 'TIF'):
                img = imread(path_to_img[i])
            else:
                img = cv2.imread(path_to_img[i])
            images.append(Image.pre_process_img(img, color='gray'))
            scales_new.append(scales[i])
    # Create and save the reconstructed images
    print("Reconstruct images ...")
    reconstructed_predictions, reconstructed_masks = tools.reconstruct_images(
        images=images,
        predictions=predictions,
        scales=scales_new,
        rescale=config.scale,
        overlap=config.overlap,
        config=config,
        label_output=True,
        dilate_objects=int(args.dilate))
    for index, i in enumerate(reconstructed_masks):
        print(path_to_save[index].replace('.TIF', '_mask.TIF'))
        #imsave(path_to_save[index].replace('.TIF','_mask.TIF'),i)
        #imsave(path_to_save[index].replace('.tif','_mask.TIF'),i)
        print(path_to_save[index].replace(
            '.' + os.path.basename(path_to_save[index]).split('.')[1],
            '_mask.TIF'))
        imsave(
            path_to_save[index].replace(
                '.' + os.path.basename(path_to_save[index]).split('.')[1],
                '_mask.TIF'), i)
def main():
    parser = argparse.ArgumentParser(description='Train model.')
    parser.add_argument('--scale', help='select output folder', default=None)
    parser.add_argument('--resultfile',
                        help='select result file',
                        default=None)
    parser.add_argument('--predictionfile',
                        help='select result file',
                        default=None)
    parser.add_argument('--net', help='describe net', default=None)
    parser.add_argument('--overlap', help='select output folder', default=None)
    args = parser.parse_args()
    tisquant = TisQuantExtract()
    config = Config

    if args.scale == '1':
        config.scale = True
    if args.resultfile:
        config.resultfile = args.resultfile
    else:
        print("No result file provided")
        exit()
    if args.net:
        config.net = args.net
    if args.overlap:
        config.overlap = int(args.overlap)

    path_to_img = []
    tiles = []
    images = []
    scales = []
    scales_new = []
    with open(args.resultfile) as csv_file:
        csv_reader = csv.reader(csv_file)
        for row in csv_reader:
            path_to_img.append(row[0])
            scales.append(float(row[1]))
            tiles.append(int(row[2]))

    print(config.scale)
    tools = Tools()

    predictions = h5py.File(args.predictionfile, 'r')['predictions']

    tile_ind = 0
    for i in range(0, tiles.__len__()):
        if (tiles[i] == 0):
            images.append(
                Image.pre_process_img(imread(path_to_img[i]), color='gray'))
            scales_new.append(scales[i])
    # Create and save the reconstructed images
    reconstructed_predictions, reconstructed_masks = tools.reconstruct_images(
        images=images,
        predictions=predictions,
        scales=scales_new,
        rescale=config.scale,
        overlap=config.overlap,
        config=config)
    pickle.dump(({
        "masks": reconstructed_masks,
        "predictions": reconstructed_predictions
    }),
                open(
                    os.path.join(
                        os.path.dirname(args.predictionfile),
                        os.path.basename(args.predictionfile).replace(
                            '.h5', '_reconstructed.pkl')), "wb"))