def get_nuc_cyto_ratios(db_imgs, marker_imgs, cyto_size, params):
    """
    Take nuclear-stained images, calculate the region props to find each cell nucleus,
    then dilate around each nucleus to find the cytoplasmic area. Map this back onto the
    original image and record the ratio of the average intensities of each area.
    :param db_images: DeepBlue images for segmentation etc.
    :param cyto_size:
    :param params: dict for segmentation params
    :return:
    """
    nuc_cyto_ratios = []
    for i in range(len(db_imgs)):
        print(i, 'out of ', len(db_imgs))
        seg_img = segment_image(db_imgs[i], params)
        regions = regionprops(seg_img)
        for region in regions:
            # create blank matrix to hold each nuclear region in the image
            nuc = np.zeros(seg_img.shape)
            nuc[list(region.coords.T)] = 1

            # dilate to get cytoplasmic area
            kernel = np.ones((5, 5), np.uint8)
            cyto = cv2.dilate(nuc, kernel, iterations=cyto_size)

            # get ratios
            avg_cyto = np.mean(marker_imgs[i][cyto])
            avg_nuc = np.mean(marker_imgs[i][nuc])
            nuc_cyto_ratios.append(avg_nuc / avg_cyto)

    return nuc_cyto_ratios
Beispiel #2
0
def translate_page(img,
                   binary_threshold=defaults.BINARY_THRESHOLD,
                   boxes=False,
                   target_lang="en"):
    gray = clean.grayscale(img)

    inv_binary = cv2.bitwise_not(
        clean.binarize(gray, threshold=binary_threshold))
    binary = clean.binarize(gray, threshold=binary_threshold)

    segmented_image = seg.segment_image(gray)
    segmented_image = segmented_image[:, :, 2]

    components = get_connected_components(segmented_image)

    for component in components:
        speech = img[component]
        if speech.shape[0] <= 0 or speech.shape[1] <= 0:
            continue
        translation = helper.ocr(speech, target_lang)
        if len(translation) > 0:
            #print "added component", translation
            white_out_text(img, component)
            add_text(img, component, translation)
    if boxes:
        cc.draw_bounding_boxes(img, components, color=(255, 0, 0), line_size=2)
    return img
Beispiel #3
0
def main(image_name):
    image = cv2.imread(image_name)
    total_pieces, method, type = extract_parameters(image_name, image)
    all_array = segment_image(image, total_pieces, method, type)
    # print_array(all_array)
    border_types = search_borders(all_array)
    array_c, array_e, array_m, array_c_types, array_e_types, array_m_types = sort_images(
        all_array, border_types)
    print("containers with borders")
    # print_array(array_c)
    print(array_c_types)
    print("--------")
    # print_array(array_e)
    print(array_e_types)
    print("----------")
    # print_array(array_m)
    print(array_m_types)
    print("----------")
    result_array = solve_puzzle(total_pieces, method, array_c, array_e,
                                array_m, array_c_types, array_e_types,
                                array_m_types)
    # print_array(result_array)
    length, width = get_smallest_shape_scrambled(result_array)
    final_image = show_puzzle(total_pieces, length, width, result_array)
    return final_image
Beispiel #4
0
def optimize_params(img, params, epochs):
    for i in range(epochs):
        for key in params.keys():
            if key != 'smallest_object':
                keys = []
                params[key] *= 1.01
                keys.append(params[key])
                seg_img = segment_image(img, params)
                params[key] *= .99
                keys.append(params[key])
                seg_img2 = segment_image(img, params)
                counts = [np.max(seg_img), np.max(seg_img2)]
                params[key] = keys[counts.index(max(counts))]
                print(i, max(counts))
        print(params)
        plt.imshow(seg_img2)
        plt.show()
def segment(indir, outdir, extension):
    '''
    Segment the kernels in the image or images

    params
    * indir:    directory with images to segment
    * outdir:   directory to output images (will create if doesn't exist)
    * type:     gray or rgb
    '''
    sp.run(['mkdir', '-p', outdir])
    PATTERN = get_image_regex_pattern()
    for image_path in os.listdir(indir):
        if not PATTERN.match(image_path): continue
        try:
            image = imread(osp.join(indir, image_path))
            seg_image = segment_image(image)
            out_fname = create_name_from_path(image_path, out_dir=outdir)
            imsave(out_fname, seg_image)
        except Exception as e:
            log.error('Failed to process ' + osp.basename(image_path))
            log.error(e)
            tb.print_exc()
            continue
Beispiel #6
0
def populate_annotations():
    vgg = _cache_model(variables["model_path"])
    """
    TODO

    1. for each row in charts table
    2. get the image
    3. update x, y, height, width columns in annotations table
    """
    df = gdata.filter(variables["COARSE_LABELS"], table="charts")
    for _, row in df.iterrows():
        x = imread(BytesIO(row["image"].split(",")[1]))
        pred, mask = seg.segment_image(x,
                                       vgg,
                                       blocksize=(224, 224),
                                       plot=False)
    rp = regionprops(mask.astype(int))
    mask = mask.astype(bool)
    labeled = label(mask)
    rp = regionprops(labeled)

    for region in rp:
        minrow, mincol, maxrow, maxcol = region.bbox
    """
Beispiel #7
0
    if not os.path.isfile(infile):
        print(
            'Please provide a regular existing input file. Use -h option for help.'
        )
        sys.exit(-1)
    img = cv2.imread(infile)
    gray = clean.grayscale(img)

    binary_threshold = arg.integer_value(
        'binary_threshold', default_value=defaults.BINARY_THRESHOLD)
    if arg.boolean_value('verbose'):
        print('Binarizing with threshold value of ' + str(binary_threshold))
    inv_binary = cv2.bitwise_not(
        clean.binarize(gray, threshold=binary_threshold))
    binary = clean.binarize(gray, threshold=binary_threshold)

    segmented_image = seg.segment_image(gray)
    segmented_image = segmented_image[:, :, 2]
    components = cc.get_connected_components(segmented_image)
    cc.draw_bounding_boxes(img, components, color=(255, 0, 0), line_size=2)

    imsave(outfile, img)

    if arg.boolean_value('display'):
        cv2.imshow('segmented_image', segmented_image)

        if cv2.waitKey(0) == 27:
            cv2.destroyAllWindows()
        cv2.destroyAllWindows()
def dataset(indir, outdir, anno_file, validation_split, segment):
    '''
    Generate a dataset for CNN training

    params
    * indir: directory of preprocessed images
    * outdir: directory to output the images to
    * anno_file: the path to the file of annotations
    '''

    sp.run(['mkdir', '-p', outdir])

    fh = logging.FileHandler(osp.join(outdir, 'log'))
    fh.setLevel(logging.WARNING)
    log.addHandler(fh)

    data = osp.join(outdir, 'data')

    sp.run(['mkdir', '-p', data])
    sp.run(['mkdir', '-p', osp.join(data, 'train')])
    # sp.run(['mkdir', '-p', osp.join(data, 'valid')]) # handled by split_val

    for i in range(1, 6):
        sp.run(['mkdir', '-p', osp.join(data, 'train', str(i))])

    bbox_dir = osp.join(outdir, 'bboxes')
    bbox_err_dir = osp.join(outdir, 'err_bboxes')
    sp.run(['mkdir', '-p', bbox_dir])
    sp.run(['mkdir', '-p', bbox_err_dir])

    sp.run([
        'echo', 'filename,rating\n', '>',
        osp.join(outdir, 'annotations.csv')
    ])
    bbox_err_count = 0

    try:
        annotations = pd.read_csv(anno_file)
        # convert the ratings string to list
        annotations['ratings'] = annotations['ratings'].apply(eval)

    except FileNotFoundError as fnfe:
        log.error(fnfe)
        exit()

    annotation_file = open(osp.join(outdir, 'annotations.csv'), 'a')
    annotation_file.write("filename,rating\n")
    annotations_summary = {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0}

    for i, row in annotations.iterrows():
        log.info("Processing " + row['filename'])
        image_path = osp.join(indir, row['filename'])
        if not osp.isfile(image_path):
            log.error("Could not locate " + str(image_path))
            continue
        try:
            image = imread(image_path)
            if is_gray(image):
                image = gray2rgb(image)
        except Exception as e:
            log.error("Failed to load " + image_path)
            log.error(e)
            tb.print_exc()
            continue

        bboxes = get_sorted_bboxes(image)
        plot_bbx(image, bboxes)

        if len(bboxes) != row['num_objects']:
            log.error("Count of objects in image did not match: " +
                      row['filename'])
            out_fname = osp.join(bbox_err_dir, row['filename'])
            bbox_err_count += 1
            plt.savefig(out_fname)
            plt.close('all')
            continue
        else:
            out_fname = osp.join(bbox_dir, row['filename'])
            plt.savefig(out_fname)
            plt.close('all')

        # squash 2d list to 1d
        ratings = [entry for line in row['ratings'] for entry in line]
        if segment:
            image = segment_image(image)

        log.info("Getting thumbnails")
        for j, bbox in enumerate(bboxes):
            anno = ratings[j]
            minr, minc, maxr, maxc = bbox
            thumbnail = image[minr:maxr, minc:maxc]

            out_fname = osp.join(outdir, 'data', 'train', str(anno),
                                 str(j) + "_" + row['filename'])
            imsave(out_fname, thumbnail)
            annotation_file.write("{},{}\n".format(
                str(j) + "_" + row['filename'], anno))
            annotations_summary[anno] += 1

    annotation_file.close()
    log.info("Generating validation dataset")
    split_val(data, validation_split)

    log.info("SUMMARY:")
    log.info("Number of bbox errors: " + str(bbox_err_count))
    for i in range(1, 6):
        log.info("Number of annotations with rating {}: {}".format(
            i, annotations_summary[str(i)]))

    num_samples = sum(list(annotations_summary.values()))
    log.info("Total number of images: {}".format(num_samples))

    return
    binary_threshold = arg.integer_value('binary_threshold', default_value=defaults.BINARY_THRESHOLD)
    if arg.boolean_value('verbose'):
        print 'Binarizing with threshold value of ' + str(binary_threshold)
        
    
    
    inv_binary = cv2.bitwise_not(clean.binarize(gray, threshold=binary_threshold))
    #cv2.imshow('inv_binary', inv_binary)
    
    
    binary = clean.binarize(gray, threshold=binary_threshold)
    #cv2.imshow('binary', binary)
    
    
    segmented_image = seg.segment_image(gray)
    cv2.imshow('segmented_image', segmented_image)
    
    
    segmented_image = segmented_image[:,:,2]
    components = cc.get_connected_components(segmented_image)
    
    cc.draw_bounding_boxes(img,components,color=(255,0,0),line_size=2)
    
    
    outfile = 'img/no_equalizeHist.png'
    #imsave(outfile, img)
    
    
    cv2.imshow('result',img)
    #cv2.imshow('segmented_image',segmented_image)
Beispiel #10
0
                if len(approx) == 4 :
                    mask = np.zeros([width, height, 3], dtype = "uint8")
                    mask_gray = cv2.cvtColor(mask,cv2.COLOR_BGR2GRAY)
                    cv2.drawContours(mask_gray,[cnt],0,(255,255,255),cv2.FILLED)
                    mask_gray_white = cv2.bitwise_not(mask_gray)
                    masked_img = cv2.bitwise_and(borderWhite, mask_gray, mask)
                    masked_img = cv2.bitwise_or(mask_gray_white, masked_img)
                    x,y,w,h = cv2.boundingRect(cnt)
                    crop_img = masked_img[y:y+h, x:x+w]
                    binary_threshold=arg.integer_value('binary_threshold',default_value=defaults.BINARY_THRESHOLD)
                    if arg.boolean_value('verbose'):
                      print ('Binarizing with threshold value of ' + str(binary_threshold))
                    inv_binary = cv2.bitwise_not(clean.binarize(crop_img, threshold=binary_threshold))
                    binary = clean.binarize(crop_img, threshold=binary_threshold)

                    segmented_image = seg.segment_image(crop_img)
                    segmented_image = segmented_image[:,:,2]
                    mySceneImage = np.copy(segmented_image)
                    image, contours, hierarchy = cv2.findContours(mySceneImage,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
                    for cnt in contours:
                        if cv2.contourArea(cnt) > 3000 :
                            cv2.drawContours(crop_img,[cnt],0,(255,255,255),cv2.FILLED)
                    #cv2.imshow('image',crop_img)
                    #cv2.waitKey(0)
                    #cv2.imwrite(pathSolved+"/"+str(counter)+".jpg", crop_img)
                    edges = cv2.Canny(crop_img,500,500)
                    edgesContours = cv2.findContours(edges,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
                    CannyLines.append(len(edgesContours[1]))
                    widths.append(w)
                    heights.append(h)
                    AreaToRectangleRatio.append(cv2.contourArea(cnt)/(w*h))
Beispiel #11
0
# coding: utf-8
from skimage.measure import regionprops
from tensorflow.keras.models import load_model
from skimage.io import imread
from skimage.measure import label
import segmentation as seg
import matplotlib.pyplot as plt

vgg = load_model('vgg16-validated-five-classes.h5')
x = imread('/tmp/medistrava.png')
pred, mask = seg.segment_image(
    x, vgg, blocksize=(224, 224), plot=True,
    prob_threshold=0.66, prob_alpha=0.5, cmap=plt.cm.viridis)
rp = regionprops(mask.astype(int))
mask = mask.astype(bool)
labeled = label(mask)
rp = regionprops(labeled)

for region in rp:
    minrow, mincol, maxrow, maxcol = region.bbox
    plt.hlines(minrow, mincol, maxcol)
    plt.hlines(maxrow, mincol, maxcol)
    plt.vlines(mincol, minrow, maxrow)
    plt.vlines(maxcol, minrow, maxrow)
plt.show()