def main():
    p = opt.ArgumentParser(description="""
            Computes textural tissue descriptors from an RGB image (of an H&E slide).
            """)
    p.add_argument('img_file', action='store', help='RGB image file of an H&E slide')
    p.add_argument('out_file', action='store', default='descriptors.dat',
                   help='Name of the result file')

    # p.add_argument('model_file', action='store', help='Models file')
    p.add_argument('--scale', action='store', type=float, default=1.0,
                   help='Scale of the image at which the descriptors are computed (default: 1.0)')
    p.add_argument('--ngl', type=int, default=16, action='store',
                   help='Number of grey levels in H- and E-images (default: 16)')
    p.add_argument('--wsize', action='store', type=int, default=50,
                   help='Sliding window size (default: 50)')
    p.add_argument('--mask', action='store_true',
                   help='')


    args = p.parse_args()
    img_file = args.img_file
    # model_file = args.model_file
    n_grey_levels = args.ngl
    w_size = args.wsize
    scale = args.scale
    out_file = args.out_file

    base_name = os.path.basename(img_file).split('.')
    if len(base_name) > 1:             # at least 1 suffix .ext
        base_name.pop()                # drop the extension
        base_name = '.'.join(base_name)  # reassemble the rest of the list into file name

    img = skimage.io.imread(img_file)

    # with ModelPersistence(model_file, 'r', format='pickle') as d:
    #    rgb_models = d['models']

    img_h, img_e   = rgb2he(img, normalize=True)
    img_h          = requantize(img_h, nlevels=n_grey_levels, method='linear')
    img_e          = requantize(img_e, nlevels=n_grey_levels, method='linear')

    G = GaborDescriptor()
    if args.mask:
        mask, _ = tissue_region_from_rgb(img, _min_area=150)
        g_h = get_gabor_desc(img_h, G, w_size, scale, mask)
        g_e = get_gabor_desc(img_e, G, w_size, scale, mask)
    else:
        g_h = get_gabor_desc(img_h, G, w_size, scale)
        g_e = get_gabor_desc(img_e, G, w_size, scale)

    with open(out_file, 'w') as f:
        for d in g_h:
            f.write('\t'.join(str(x) for x in d))
            f.write('\n')
        for d in g_e:
            f.write('\t'.join(str(x) for x in d))
            f.write('\n')

    return
Exemplo n.º 2
0
def main():
    p = opt.ArgumentParser(
        description="""
            Produces a mask covering the tissue region in the image.
            """
    )
    p.add_argument("img_file", action="store", help="RGB image file")
    p.add_argument(
        "--prefix", action="store", help="optional prefix for the result files: prefix_tissue_mask.pbm", default=None
    )
    p.add_argument("--minarea", action="store", help="object smaller than this will be removed", default=150)
    p.add_argument(
        "--gth", action="store", help="if provided, indicates the threshold in the green channel", default=None
    )
    p.add_argument("--meta", action="store_true", help="store meta information associated with the results")
    args = p.parse_args()
    base_name = os.path.basename(args.img_file).split(".")
    if len(base_name) > 1:  # at least 1 suffix .ext
        base_name.pop()  # drop the extension
        base_name = ".".join(base_name)  # reassemble the rest of the list into file name

    if args.prefix is not None:
        pfx = args.prefix
    else:
        pfx = base_name

    img = skimage.io.imread(args.img_file)
    mask, g_th = tissue_region_from_rgb(img, _min_area=args.minarea, _g_th=args.gth)
    skimage.io.imsave(pfx + "_tissue_mask.pbm", 255 * mask.astype("uint8"))

    if args.meta:
        r = ET.Element("meta", attrib={"processor": "wsi_mask"})
        t = ET.SubElement(r, "file")
        t.text = args.img_file
        t = ET.SubElement(r, "parameters")
        t1 = ET.SubElement(t, "prefix")
        t1.text = args.prefix
        t1 = ET.SubElement(t, "minarea")
        t1.text = str(args.minarea)
        t1 = ET.SubElement(t, "gth")
        t1.text = str(args.gth)
        t = ET.SubElement(r, "outfile")
        t.text = pfx + "_tissue_mask.pbm"
        t = ET.SubElement(r, "gth_res")
        t.text = str(g_th)

        raw_txt = ET.tostring(r, "utf-8")
        reparsed = minidom.parseString(raw_txt)
        pp_txt = reparsed.toprettyxml(indent="  ")
        meta_file = open(pfx + "_tissue_mask.meta.xml", "w")
        meta_file.write(pp_txt)

    return
Exemplo n.º 3
0
def run():
    global img_file, res_prefix, s_factors, n_splits, ovlap

    # print(img_file)
    # print(res_prefix)
    # print(s_factors)
    # print(n_splits)
    # print(ovlap)

    r = ET.Element('meta')
    t = ET.SubElement(r, 'file')
    t.text = img_file

    t = ET.SubElement(r, 'parameters')
    t1 = ET.SubElement(t, 'prefix')
    t1.text = res_prefix

    t1 = ET.SubElement(t, 'shrink')
    for s in s_factors:
        t2 = ET.SubElement(t1, 'factor')
        t2.text = str(s)

    t1 = ET.SubElement(t, 'split')
    for s in n_splits:
        t2 = ET.SubElement(t1, 'tile')
        t2.text = str(s)

    t1 = ET.SubElement(t, 'overlap')
    t1.text = str(ovlap)

    img = VImage.VImage(img_file)

    t1 = ET.SubElement(r, 'original')
    t2 = ET.SubElement(t1, 'width')
    t2.text = str(img.Xsize())
    t2 = ET.SubElement(t1, 'height')
    t2.text = str(img.Ysize())
    t2 = ET.SubElement(t1, 'channels')
    t2.text = str(img.Bands())
    t2 = ET.SubElement(t1, 'xres')
    t2.text = str(img.Xres())
    t2 = ET.SubElement(t1, 'yres')
    t2.text = str(img.Yres())
    t2 = ET.SubElement(t1, 'scale')
    t2.text = '1.0'
    t2 = ET.SubElement(t1, 'tile')
    t2.text = '(1, 1)'

    path = res_prefix + '/' + os.path.basename(img_file)
    if os.path.exists(path):
        print('Warning: Overwriting old files!')
    else:
        os.mkdir(path)

    print("ROI detection: ")
    # Find the ROI:
    # img_scaled = img.shrink(100, 100)
    os.spawnv(os.P_WAIT, 'div100', ['./div100', img_file, path + '/small.ppm'])
    # save downscaled image - not the best way for going to Scikit Image,
    # but for large images we go through disk I/O anyway:
    # print("    -saving small version of the image")
    # img_scaled.write(path+'/small.ppm')

    print("    -read into scikit-learn")
    img_scaled = imread(path + '/small.ppm')

    # compute a minimal area based on the resolution of the image
    # -the image is 100x smaller than the original -> resolution is
    print("    -computing mask")
    xres, yres = img.Xres() / 100, img.Yres() / 100
    min_area = 4 * min(xres, yres)  # ~4 mm**2
    mask, _ = tissue_region_from_rgb(img_scaled, min_area)

    # save the mask:
    print("    -saving mask")
    imsave(path + '/' + 'mask_div100.pbm', mask)
    t2 = ET.SubElement(t1, 'mask')
    t2.text = 'mask_div100.pbm'

    # coordinates of the ROI encompassing the objects, at 0.01 of original image
    print("    -detect ROI")
    rmin, cmin, rmax, cmax = bounding_box(mask)
    mask = mask[rmin:rmax + 1, cmin:cmax + 1]

    # get back to original coordinates, with an approximation of 100 pixels...
    rmin *= 100
    cmin *= 100
    rmax = min((rmax + 1) * 100, img.Ysize())
    cmax = min((cmax + 1) * 100, img.Xsize())

    t2 = ET.SubElement(t1, 'roi', {
        'xmin': str(cmin),
        'ymin': str(rmin),
        'xmax': str(cmax),
        'ymax': str(rmax)
    })

    print("...end ROI detection.")

    # Save initial level 0:
    print("Crop ROI and save...")
    img_cropped = img.extract_area(cmin, rmin, cmax - cmin + 1,
                                   rmax - rmin + 1)
    img_cropped.write(path + '/pyramid-level_0.ppm')
    new_width, new_height = img_cropped.Xsize(), img_cropped.Ysize()
    img_cropped = None
    print("...OK")

    mask = None
    img = None  # done with it
    gc.collect()

    # Generate the pyramid
    t1 = ET.SubElement(r, 'pyramid')
    t2 = ET.SubElement(t1, 'level', {
        'value': '0',
        'file': 'pyramid-level_0.ppm'
    })
    t2 = ET.SubElement(t1, 'level', {
        'value': '1',
        'file': 'pyramid-level_1.ppm'
    })
    t2 = ET.SubElement(t1, 'level', {
        'value': '2',
        'file': 'pyramid-level_2.ppm'
    })
    t2 = ET.SubElement(t1, 'level', {
        'value': '3',
        'file': 'pyramid-level_3.ppm'
    })
    t2 = ET.SubElement(t1, 'level', {
        'value': '4',
        'file': 'pyramid-level_4.ppm'
    })
    t2 = ET.SubElement(t1, 'level', {
        'value': '5',
        'file': 'pyramid-level_5.ppm'
    })

    # call external tool:
    print("Computing pyramid...")
    os.spawnv(os.P_WAIT, 'pyr', [
        './pyr', path + '/pyramid-level_0.ppm', path + '/pyramid',
        str(n_levels)
    ])
    print("...done")

    k = 0
    for l in np.arange(n_levels + 1):
        f = s_factors[l]
        pt = path + '/' + str(s_factors[l])
        if not os.path.exists(pt):
            os.mkdir(pt)

        t1 = ET.SubElement(r, 'version')
        t2 = ET.SubElement(t1, 'scale')
        t2.text = str(f)

        n_horiz, n_vert = n_splits[k]
        t2 = ET.SubElement(t1, 'split')
        t2.text = str((n_horiz, n_vert))

        # img_scaled = img_cropped.shrink(f, f)
        # load the corresponding level in the pyramid:
        img_scaled = VImage.VImage(path + '/pyramid-level_' + str(l) + '.ppm')
        width = img_scaled.Xsize()
        height = img_scaled.Ysize()

        w = width / n_horiz
        h = height / n_vert
        ov = ovlap / 100.0 / 2.0  # ovlap is in % and we need half of it
        sv = int(n_vert != 1)
        sh = int(n_horiz != 1)

        print('Processing scale %d and tile %d,%d' % (f, n_horiz, n_vert))

        y0 = 0
        for i in np.arange(n_vert):
            x0 = 0

            if i < n_vert - 1:
                y1 = int(y0 + h * (1.0 + sv * ov)) - 1
            else:
                y1 = height - 1

            for j in np.arange(n_horiz):
                if j < n_horiz - 1:
                    x1 = int(x0 + w * (1.0 + sh * ov)) - 1
                else:
                    x1 = width - 1

                tile_name = 'tile_' + str(i) + '_' + str(j) + '.' + res_format
                res_file = pt + '/' + tile_name
                print('Save to' + res_file)
                t2 = ET.SubElement(
                    t1, 'tile', {
                        'name': tile_name,
                        'x0': str(x0),
                        'y0': str(y0),
                        'x1': str(x1),
                        'y1': str(y1)
                    })

                #print('x0 = %d, y0 = %d, x1 = %d, y1 = %d, sh = %d, ov = %f; image: %d x %d' % (x0, y0, x1, y1, sh, ov, width, height))

                # do the actual work...
                img_sub = img_scaled.extract_area(x0, y0, x1 - x0 + 1,
                                                  y1 - y0 + 1)
                img_sub.write(res_file)

                x0 = int(x1 + 1 - 2.0 * w * ov)

            y0 = int(y1 + 1 - 2.0 * w * ov)

        k += 1

    raw_txt = ET.tostring(r, 'utf-8')
    reparsed = minidom.parseString(raw_txt)
    pp_txt = reparsed.toprettyxml(indent='  ')

    #print(pp_txt)
    meta_file = open(path + '/meta.xml', 'w')
    meta_file.write(pp_txt)

    return
Exemplo n.º 4
0
def main():
    p = opt.ArgumentParser(description="""
            Produces a mask covering the tissue region in the image.
            """)
    p.add_argument('img_file', action='store', help='RGB image file')
    p.add_argument(
        '--prefix',
        action='store',
        help='optional prefix for the result files: prefix_tissue_mask.pbm',
        default=None)
    p.add_argument('--minarea',
                   action='store',
                   help='object smaller than this will be removed',
                   default=150)
    p.add_argument(
        '--gth',
        action='store',
        help='if provided, indicates the threshold in the green channel',
        default=None)
    p.add_argument('--meta',
                   action='store_true',
                   help='store meta information associated with the results')
    args = p.parse_args()
    base_name = os.path.basename(args.img_file).split('.')
    if len(base_name) > 1:  # at least 1 suffix .ext
        base_name.pop()  # drop the extension
        base_name = '.'.join(
            base_name)  # reassemble the rest of the list into file name

    if args.prefix is not None:
        pfx = args.prefix
    else:
        pfx = base_name

    img = skimage.io.imread(args.img_file)
    mask, g_th = tissue_region_from_rgb(img,
                                        _min_area=args.minarea,
                                        _g_th=args.gth)
    skimage.io.imsave(pfx + '_tissue_mask.pbm', 255 * mask.astype('uint8'))

    if args.meta:
        r = ET.Element('meta', attrib={'processor': 'wsi_mask'})
        t = ET.SubElement(r, 'file')
        t.text = args.img_file
        t = ET.SubElement(r, 'parameters')
        t1 = ET.SubElement(t, 'prefix')
        t1.text = args.prefix
        t1 = ET.SubElement(t, 'minarea')
        t1.text = str(args.minarea)
        t1 = ET.SubElement(t, 'gth')
        t1.text = str(args.gth)
        t = ET.SubElement(r, 'outfile')
        t.text = pfx + '_tissue_mask.pbm'
        t = ET.SubElement(r, 'gth_res')
        t.text = str(g_th)

        raw_txt = ET.tostring(r, 'utf-8')
        reparsed = minidom.parseString(raw_txt)
        pp_txt = reparsed.toprettyxml(indent='  ')
        meta_file = open(pfx + '_tissue_mask.meta.xml', 'w')
        meta_file.write(pp_txt)

    return
Exemplo n.º 5
0
def run():
    global img_file, res_prefix, s_factors, n_splits, ovlap

    # print(img_file)
    # print(res_prefix)
    # print(s_factors)
    # print(n_splits)
    # print(ovlap)

    r = ET.Element('meta')
    t = ET.SubElement(r, 'file')
    t.text = img_file

    t = ET.SubElement(r, 'parameters')
    t1 = ET.SubElement(t, 'prefix')
    t1.text = res_prefix

    t1 = ET.SubElement(t, 'shrink')
    for s in s_factors:
        t2 = ET.SubElement(t1, 'factor')
        t2.text = str(s)

    t1 = ET.SubElement(t, 'split')
    for s in n_splits:
        t2 = ET.SubElement(t1, 'tile')
        t2.text = str(s)

    t1 = ET.SubElement(t, 'overlap')
    t1.text = str(ovlap)

    img = VImage.VImage(img_file)

    t1 = ET.SubElement(r, 'original')
    t2 = ET.SubElement(t1, 'width')
    t2.text = str(img.Xsize())
    t2 = ET.SubElement(t1, 'height')
    t2.text = str(img.Ysize())
    t2 = ET.SubElement(t1, 'channels')
    t2.text = str(img.Bands())
    t2 = ET.SubElement(t1, 'xres')
    t2.text = str(img.Xres())
    t2 = ET.SubElement(t1, 'yres')
    t2.text = str(img.Yres())
    t2 = ET.SubElement(t1, 'scale')
    t2.text = '1.0'
    t2 = ET.SubElement(t1, 'tile')
    t2.text = '(1, 1)'

    path = res_prefix + '/' + os.path.basename(img_file)
    if os.path.exists(path):
        print('Warning: Overwriting old files!')
    else:
        os.mkdir(path)

    print("ROI detection: ")
    # Find the ROI:
    # img_scaled = img.shrink(100, 100)
    os.spawnv(os.P_WAIT, 'div100', ['./div100', img_file, path+'/small.ppm'])
    # save downscaled image - not the best way for going to Scikit Image,
    # but for large images we go through disk I/O anyway:
    # print("    -saving small version of the image")
    # img_scaled.write(path+'/small.ppm')

    print("    -read into scikit-learn")
    img_scaled = imread(path+'/small.ppm')

    # compute a minimal area based on the resolution of the image
    # -the image is 100x smaller than the original -> resolution is
    print("    -computing mask")
    xres, yres = img.Xres() / 100, img.Yres() / 100
    min_area   = 4 * min(xres, yres)   # ~4 mm**2
    mask, _ = tissue_region_from_rgb(img_scaled, min_area)

    # save the mask:
    print("    -saving mask")
    imsave(path + '/' + 'mask_div100.pbm', mask)
    t2 = ET.SubElement(t1, 'mask')
    t2.text = 'mask_div100.pbm'

    # coordinates of the ROI encompassing the objects, at 0.01 of original image
    print("    -detect ROI")
    rmin, cmin, rmax, cmax = bounding_box(mask)
    mask = mask[rmin:rmax+1, cmin:cmax+1]

    # get back to original coordinates, with an approximation of 100 pixels...
    rmin *= 100
    cmin *= 100
    rmax = min((rmax + 1) * 100, img.Ysize())
    cmax = min((cmax + 1) * 100, img.Xsize())

    t2 = ET.SubElement(t1, 'roi',
        {'xmin': str(cmin), 'ymin': str(rmin), 'xmax': str(cmax), 'ymax': str(rmax)})

    print("...end ROI detection.")

    # Save initial level 0:
    print("Crop ROI and save...")
    img_cropped = img.extract_area(cmin, rmin, cmax-cmin+1, rmax-rmin+1)
    img_cropped.write(path + '/pyramid-level_0.ppm')
    new_width, new_height = img_cropped.Xsize(), img_cropped.Ysize()
    img_cropped = None
    print("...OK")

    mask = None
    img = None                                            # done with it
    gc.collect()

    # Generate the pyramid
    t1 = ET.SubElement(r, 'pyramid')
    t2 = ET.SubElement(t1, 'level', {'value': '0', 'file': 'pyramid-level_0.ppm'})
    t2 = ET.SubElement(t1, 'level', {'value': '1', 'file': 'pyramid-level_1.ppm'})
    t2 = ET.SubElement(t1, 'level', {'value': '2', 'file': 'pyramid-level_2.ppm'})
    t2 = ET.SubElement(t1, 'level', {'value': '3', 'file': 'pyramid-level_3.ppm'})
    t2 = ET.SubElement(t1, 'level', {'value': '4', 'file': 'pyramid-level_4.ppm'})
    t2 = ET.SubElement(t1, 'level', {'value': '5', 'file': 'pyramid-level_5.ppm'})

    # call external tool:
    print("Computing pyramid...")
    os.spawnv(os.P_WAIT, 'pyr', ['./pyr', path+'/pyramid-level_0.ppm', path+'/pyramid', str(n_levels)])
    print("...done")

    k = 0
    for l in np.arange(n_levels+1):
        f = s_factors[l]
        pt = path + '/' + str(s_factors[l])
        if not os.path.exists(pt):
            os.mkdir(pt)

        t1 = ET.SubElement(r, 'version')
        t2 = ET.SubElement(t1, 'scale')
        t2.text = str(f)

        n_horiz, n_vert = n_splits[k]
        t2 = ET.SubElement(t1, 'split')
        t2.text = str((n_horiz, n_vert))

        # img_scaled = img_cropped.shrink(f, f)
        # load the corresponding level in the pyramid:
        img_scaled = VImage.VImage(path + '/pyramid-level_' + str(l) + '.ppm')
        width = img_scaled.Xsize()
        height = img_scaled.Ysize()

        w = width / n_horiz
        h = height / n_vert
        ov = ovlap/100.0/2.0  # ovlap is in % and we need half of it
        sv = int(n_vert != 1)
        sh = int(n_horiz != 1)

        print('Processing scale %d and tile %d,%d' % (f, n_horiz, n_vert))

        y0 = 0
        for i in np.arange(n_vert):
            x0 = 0

            if i < n_vert - 1:
                y1 = int(y0 + h * (1.0 + sv*ov)) - 1
            else:
                y1 = height - 1

            for j in np.arange(n_horiz):
                if j < n_horiz - 1:
                    x1 = int(x0 + w * (1.0 + sh*ov)) - 1
                else:
                    x1 = width - 1

                tile_name = 'tile_' + str(i) + '_' + str(j) + '.' + res_format
                res_file = pt + '/' + tile_name
                print('Save to' + res_file)
                t2 = ET.SubElement(t1, 'tile',
                    {'name': tile_name, 'x0':str(x0), 'y0':str(y0), 'x1':str(x1), 'y1':str(y1)})

                #print('x0 = %d, y0 = %d, x1 = %d, y1 = %d, sh = %d, ov = %f; image: %d x %d' % (x0, y0, x1, y1, sh, ov, width, height))

                # do the actual work...
                img_sub = img_scaled.extract_area(x0, y0, x1-x0+1, y1-y0+1)
                img_sub.write(res_file)

                x0 = int(x1 + 1 - 2.0 * w * ov)

            y0 = int(y1 + 1 - 2.0 * w * ov)

        k += 1


    raw_txt = ET.tostring(r, 'utf-8')
    reparsed = minidom.parseString(raw_txt)
    pp_txt = reparsed.toprettyxml(indent='  ')

    #print(pp_txt)
    meta_file = open(path+'/meta.xml', 'w')
    meta_file.write(pp_txt)

    return
Exemplo n.º 6
0
def main():
    p = opt.ArgumentParser(description="""
            Computes textural tissue descriptors from an RGB image (of an H&E slide).
            """)
    p.add_argument('img_file',
                   action='store',
                   help='RGB image file of an H&E slide')
    p.add_argument('out_file',
                   action='store',
                   default='descriptors.dat',
                   help='Name of the result file')

    # p.add_argument('model_file', action='store', help='Models file')
    p.add_argument(
        '--scale',
        action='store',
        type=float,
        default=1.0,
        help=
        'Scale of the image at which the descriptors are computed (default: 1.0)'
    )
    p.add_argument(
        '--ngl',
        type=int,
        default=16,
        action='store',
        help='Number of grey levels in H- and E-images (default: 16)')
    p.add_argument('--wsize',
                   action='store',
                   type=int,
                   default=50,
                   help='Sliding window size (default: 50)')
    p.add_argument('--mask', action='store_true', help='')

    args = p.parse_args()
    img_file = args.img_file
    # model_file = args.model_file
    n_grey_levels = args.ngl
    w_size = args.wsize
    scale = args.scale
    out_file = args.out_file

    base_name = os.path.basename(img_file).split('.')
    if len(base_name) > 1:  # at least 1 suffix .ext
        base_name.pop()  # drop the extension
        base_name = '.'.join(
            base_name)  # reassemble the rest of the list into file name

    img = skimage.io.imread(img_file)

    # with ModelPersistence(model_file, 'r', format='pickle') as d:
    #    rgb_models = d['models']

    img_h, img_e = rgb2he(img, normalize=True)
    img_h = requantize(img_h, nlevels=n_grey_levels, method='linear')
    img_e = requantize(img_e, nlevels=n_grey_levels, method='linear')

    G = GaborDescriptor()
    if args.mask:
        mask, _ = tissue_region_from_rgb(img, _min_area=150)
        g_h = get_gabor_desc(img_h, G, w_size, scale, mask)
        g_e = get_gabor_desc(img_e, G, w_size, scale, mask)
    else:
        g_h = get_gabor_desc(img_h, G, w_size, scale)
        g_e = get_gabor_desc(img_e, G, w_size, scale)

    with open(out_file, 'w') as f:
        for d in g_h:
            f.write('\t'.join(str(x) for x in d))
            f.write('\n')
        for d in g_e:
            f.write('\t'.join(str(x) for x in d))
            f.write('\n')

    return
Exemplo n.º 7
0
def main():
    p = opt.ArgumentParser(description="""
            Constructs a dictionary for image representation based on a set of specified local
            descriptors. The dictionary is built from a set of images given as a list in an
            input file.
            """)
    p.add_argument('config', action='store', help='a configuration file')
    args = p.parse_args()
    cfg_file = args.config
    
    parser = SafeConfigParser()
    parser.read(cfg_file)
    
    #---------
    # sampler:
    if not parser.has_section('sampler'):
        raise ValueError('"sampler" section is mandatory')
    if not parser.has_option('sampler', 'type'):
        raise ValueError('"sampler.type" is mandatory')
    tmp = parser.get('sampler', 'type').lower()
    if tmp not in ['random', 'sliding']:
        raise ValueError('Unkown sampling type')
    sampler_type = tmp
    if not parser.has_option('sampler', 'window_size'):
        raise ValueError('"sampler.window_size" is mandatory')
    wnd_size = ast.literal_eval(parser.get('sampler', 'window_size'))
    if type(wnd_size) != tuple:
        raise ValueError('"sampler.window_size" specification error')
    it_start = (0,0)
    it_step = (1,1)
    if sampler_type == 'sliding':
        if parser.has_option('sampler', 'start'):
            it_start = ast.literal_eval(parser.get('sampler','start'))
        if parser.has_option('sampler', 'step'):
            it_step  = ast.literal_eval(parser.get('sampler','step'))
    nwindows = parser.getint('sampler', 'nwindows')
                                    

    local_descriptors = []
    #---------
    # haar:
    if parser.has_section('haar'):
        tmp = True
        if parser.has_option('haar', 'norm'):
            tmp = parser.getboolean('haar', 'norm')
        if len(parser.items('haar')) == 0:
            # empty section, use defaults
            h = HaarLikeDescriptor(HaarLikeDescriptor.haars1())
        else:
            h = HaarLikeDescriptor([ast.literal_eval(v) for n, v in parser.items('haar')
                                    if n.lower() != 'norm'],
                _norm=tmp)
        local_descriptors.append(h)
        
        
    #---------
    # identity:
    if parser.has_section('identity'):
        local_descriptors.append(IdentityDescriptor())
        
    #---------
    # stats:
    if parser.has_section('stats'):
        tmp = []
        if parser.has_option('stats', 'mean') and parser.getboolean('stats', 'mean'):
            tmp.append('mean')
        if parser.has_option('stats', 'std') and parser.getboolean('stats', 'std'):
            tmp.append('std')
        if parser.has_option('stats', 'kurtosis') and parser.getboolean('stats', 'kurtosis'):
            tmp.append('kurtosis')
        if parser.has_option('stats', 'skewness') and parser.getboolean('stats', 'skewness'):
            tmp.append('skewness')
        if len(tmp) == 0:
            tmp = None
        local_descriptors.append(StatsDescriptor(tmp))
    
    #---------
    # hist:
    if parser.has_section('hist'):
        tmp = (0.0, 1.0)
        tmp2 = 10
        if parser.has_option('hist', 'min_max'):
            tmp = ast.literal_eval(parser.get('hist', 'min_max'))
            if type(tmp) != tuple:
                raise ValueError('"hist.min_max" specification error')
        if parser.has_option('hist', 'nbins'):
            tmp2 = parser.getint('hist', 'nbins')
        local_descriptors.append(HistDescriptor(_interval=tmp, _nbins=tmp2))
    
    
    #---------
    # HoG
    if parser.has_section('hog'):
        tmp = 9
        tmp2 = (128, 128)
        tmp3 = (4, 4)
        
        if parser.has_option('hog', 'norient'):
            tmp = parser.getint('hog', 'norient')
        if parser.has_option('hog', 'ppc'):
            tmp2 = ast.literal_eval(parser.get('hog', 'ppc'))
            if type(tmp2) != tuple:
                raise ValueError('"hog.ppc" specification error')
        if parser.has_option('hog', 'cpb'):
            tmp3 = ast.literal_eval(parser.get('hog', 'cpb'))
            if type(tmp3) != tuple:
                raise ValueError('"hog.cpb" specification error')
        local_descriptors.append(HOGDescriptor(_norient=tmp, _ppc=tmp2, _cpb=tmp3))
        
        
    #---------
    # LBP
    if parser.has_section('lbp'):
        tmp = 3
        tmp2 = 8*tmp
        tmp3 = 'uniform'
        
        if parser.has_option('lbp', 'radius'):
            tmp = parser.getint('lbp', 'radius')
        if parser.has_option('lbp', 'npoints'):
            tmp2 = parser.getint('lbp', 'npoints')
            if tmp2 == 0:
                tmp2 = 8* tmp
        if parser.has_option('lbp', 'method'):
            tmp3 = parser.get('lbp', 'method')
        local_descriptors.append(LBPDescriptor(radius=tmp, npoints=tmp2, method=tmp3))

    #---------
    # Gabor
    if parser.has_section('gabor'):
        tmp  = np.array([0.0, np.pi / 4.0, np.pi / 2.0, 3.0 * np.pi / 4.0], dtype=np.double)
        tmp2 = np.array([3.0 / 4.0, 3.0 / 8.0, 3.0 / 16.0], dtype=np.double)
        tmp3 = np.array([1.0, 2 * np.sqrt(2.0)], dtype=np.double)

        if parser.has_option('gabor', 'theta'):
            tmp = ast.literal_eval(parser.get('gabor', 'theta'))
        if parser.has_option('gabor', 'freq'):
            tmp2 = ast.literal_eval(parser.get('gabor', 'freq'))
        if parser.has_option('gabor', 'sigma'):
            tmp3 = ast.literal_eval(parser.get('gabor', 'sigma'))
        local_descriptors.append(GaborDescriptor(theta=tmp, freq=tmp2, sigma=tmp3))
            
    print('No. of descriptors: ', len(local_descriptors))
    
    #---------
    # data
    if not parser.has_section('data'):
        raise ValueError('Section "data" is mandatory.')
    data_path = parser.get('data', 'input_path')
    img_ext = parser.get('data', 'image_type')
    res_path = parser.get('data', 'output_path')
    
    img_files = glob.glob(data_path + '/*.' + img_ext)
    if len(img_files) == 0:
        return
    
    ## Process:

    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)    # unbuferred output
    for img_name in img_files:
        print("Image: ", img_name, " ...reading... ", end='')
        im = imread(img_name)
        print("preprocessing... ", end='')
        # -preprocessing
        if im.ndim == 3:
            im_h, _, _ = rgb2he2(im)
        else:
            raise ValueError('Input image must be RGB.')
        
        # detect object region:
        # -try to load a precomputed mask:
        mask_file_name = data_path+'/mask/'+ \
            os.path.splitext(os.path.split(img_name)[1])[0]+ \
            '_tissue_mask.pbm'
        if os.path.exists(mask_file_name):
            print('(loading mask)...', end='')
            mask = imread(mask_file_name)
            mask = img_as_bool(mask)
            mask = remove_small_objects(mask, min_size=500, connectivity=1, in_place=True)
        else:
            print('(computing mask)...', end='')
            mask, _ = tissue_region_from_rgb(im, _min_area=500)
        
        row_min, col_min, row_max, col_max = bounding_box(mask)
        im_h[np.logical_not(mask)] = 0                       # make sure background is 0
        mask = None
        im = None
        im_h = im_h[row_min:row_max+1, col_min:col_max+1]

        print("growing the bag...", end='')
        # -image bag growing
        bag = None                               # bag for current image
        for d in local_descriptors:
            if bag is None:
                bag = grow_bag_from_new_image(im_h, d, wnd_size, nwindows, discard_empty=True)
            else:
                bag[d.name] = grow_bag_with_new_features(im_h, bag['regs'], d)[d.name]

        # save the results for each image, one file per descriptor
        desc_names = bag.keys()
        desc_names.remove('regs')                  # keep all keys but the regions
        # -save the ROI from the original image:
        res_file = res_path + '/' + 'roi-' + \
                   os.path.splitext(os.path.split(img_name)[1])[0] + '.dat'
        with open(res_file, 'w') as f:
            f.write('\t'.join([str(x_) for x_ in [row_min, row_max, col_min, col_max]]))
                    
        for dn in desc_names:
            res_file = res_path + '/' + dn + '_bag-' + \
                       os.path.splitext(os.path.split(img_name)[1])[0] + '.dat'
            with open(res_file, 'w') as f:
                n = len(bag[dn])                       # total number of descriptors of this type
                for i in range(n):
                    s = '\t'.join([str(x_) for x_ in bag['regs'][i]]) + '\t' + \
                        '\t'.join([str(x_) for x_ in bag[dn][i]]) + '\n'
                    f.write(s)
            
        print('OK')
        
        bag = None
        gc.collect()
        gc.collect()
Exemplo n.º 8
0
def main():
    p = opt.ArgumentParser(description="""
            Constructs a dictionary for image representation based on a set of specified local
            descriptors. The dictionary is built from a set of images given as a list in an
            input file.
            """)
    p.add_argument('config', action='store', help='a configuration file')
    args = p.parse_args()
    cfg_file = args.config

    parser = SafeConfigParser()
    parser.read(cfg_file)

    #---------
    # sampler:
    if not parser.has_section('sampler'):
        raise ValueError('"sampler" section is mandatory')
    if not parser.has_option('sampler', 'type'):
        raise ValueError('"sampler.type" is mandatory')
    tmp = parser.get('sampler', 'type').lower()
    if tmp not in ['random', 'sliding']:
        raise ValueError('Unkown sampling type')
    sampler_type = tmp
    if not parser.has_option('sampler', 'window_size'):
        raise ValueError('"sampler.window_size" is mandatory')
    wnd_size = ast.literal_eval(parser.get('sampler', 'window_size'))
    if type(wnd_size) != tuple:
        raise ValueError('"sampler.window_size" specification error')
    it_start = (0, 0)
    it_step = (1, 1)
    if sampler_type == 'sliding':
        if parser.has_option('sampler', 'start'):
            it_start = ast.literal_eval(parser.get('sampler', 'start'))
        if parser.has_option('sampler', 'step'):
            it_step = ast.literal_eval(parser.get('sampler', 'step'))
    nwindows = parser.getint('sampler', 'nwindows')

    local_descriptors = []
    #---------
    # haar:
    if parser.has_section('haar'):
        tmp = True
        if parser.has_option('haar', 'norm'):
            tmp = parser.getboolean('haar', 'norm')
        if len(parser.items('haar')) == 0:
            # empty section, use defaults
            h = HaarLikeDescriptor(HaarLikeDescriptor.haars1())
        else:
            h = HaarLikeDescriptor([
                ast.literal_eval(v)
                for n, v in parser.items('haar') if n.lower() != 'norm'
            ],
                                   _norm=tmp)
        local_descriptors.append(h)

    #---------
    # identity:
    if parser.has_section('identity'):
        local_descriptors.append(IdentityDescriptor())

    #---------
    # stats:
    if parser.has_section('stats'):
        tmp = []
        if parser.has_option('stats', 'mean') and parser.getboolean(
                'stats', 'mean'):
            tmp.append('mean')
        if parser.has_option('stats', 'std') and parser.getboolean(
                'stats', 'std'):
            tmp.append('std')
        if parser.has_option('stats', 'kurtosis') and parser.getboolean(
                'stats', 'kurtosis'):
            tmp.append('kurtosis')
        if parser.has_option('stats', 'skewness') and parser.getboolean(
                'stats', 'skewness'):
            tmp.append('skewness')
        if len(tmp) == 0:
            tmp = None
        local_descriptors.append(StatsDescriptor(tmp))

    #---------
    # hist:
    if parser.has_section('hist'):
        tmp = (0.0, 1.0)
        tmp2 = 10
        if parser.has_option('hist', 'min_max'):
            tmp = ast.literal_eval(parser.get('hist', 'min_max'))
            if type(tmp) != tuple:
                raise ValueError('"hist.min_max" specification error')
        if parser.has_option('hist', 'nbins'):
            tmp2 = parser.getint('hist', 'nbins')
        local_descriptors.append(HistDescriptor(_interval=tmp, _nbins=tmp2))

    #---------
    # HoG
    if parser.has_section('hog'):
        tmp = 9
        tmp2 = (128, 128)
        tmp3 = (4, 4)

        if parser.has_option('hog', 'norient'):
            tmp = parser.getint('hog', 'norient')
        if parser.has_option('hog', 'ppc'):
            tmp2 = ast.literal_eval(parser.get('hog', 'ppc'))
            if type(tmp2) != tuple:
                raise ValueError('"hog.ppc" specification error')
        if parser.has_option('hog', 'cpb'):
            tmp3 = ast.literal_eval(parser.get('hog', 'cpb'))
            if type(tmp3) != tuple:
                raise ValueError('"hog.cpb" specification error')
        local_descriptors.append(
            HOGDescriptor(_norient=tmp, _ppc=tmp2, _cpb=tmp3))

    #---------
    # LBP
    if parser.has_section('lbp'):
        tmp = 3
        tmp2 = 8 * tmp
        tmp3 = 'uniform'

        if parser.has_option('lbp', 'radius'):
            tmp = parser.getint('lbp', 'radius')
        if parser.has_option('lbp', 'npoints'):
            tmp2 = parser.getint('lbp', 'npoints')
            if tmp2 == 0:
                tmp2 = 8 * tmp
        if parser.has_option('lbp', 'method'):
            tmp3 = parser.get('lbp', 'method')
        local_descriptors.append(
            LBPDescriptor(radius=tmp, npoints=tmp2, method=tmp3))

    #---------
    # Gabor
    if parser.has_section('gabor'):
        tmp = np.array([0.0, np.pi / 4.0, np.pi / 2.0, 3.0 * np.pi / 4.0],
                       dtype=np.double)
        tmp2 = np.array([3.0 / 4.0, 3.0 / 8.0, 3.0 / 16.0], dtype=np.double)
        tmp3 = np.array([1.0, 2 * np.sqrt(2.0)], dtype=np.double)

        if parser.has_option('gabor', 'theta'):
            tmp = ast.literal_eval(parser.get('gabor', 'theta'))
        if parser.has_option('gabor', 'freq'):
            tmp2 = ast.literal_eval(parser.get('gabor', 'freq'))
        if parser.has_option('gabor', 'sigma'):
            tmp3 = ast.literal_eval(parser.get('gabor', 'sigma'))
        local_descriptors.append(
            GaborDescriptor(theta=tmp, freq=tmp2, sigma=tmp3))

    print('No. of descriptors: ', len(local_descriptors))

    #---------
    # data
    if not parser.has_section('data'):
        raise ValueError('Section "data" is mandatory.')
    data_path = parser.get('data', 'input_path')
    img_ext = parser.get('data', 'image_type')
    res_path = parser.get('data', 'output_path')

    img_files = glob.glob(data_path + '/*.' + img_ext)
    if len(img_files) == 0:
        return

    ## Process:

    sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)  # unbuferred output
    for img_name in img_files:
        print("Image: ", img_name, " ...reading... ", end='')
        im = imread(img_name)
        print("preprocessing... ", end='')
        # -preprocessing
        if im.ndim == 3:
            im_h, _, _ = rgb2he2(im)
        else:
            raise ValueError('Input image must be RGB.')

        # detect object region:
        # -try to load a precomputed mask:
        mask_file_name = data_path+'/mask/'+ \
            os.path.splitext(os.path.split(img_name)[1])[0]+ \
            '_tissue_mask.pbm'
        if os.path.exists(mask_file_name):
            print('(loading mask)...', end='')
            mask = imread(mask_file_name)
            mask = img_as_bool(mask)
            mask = remove_small_objects(mask,
                                        min_size=500,
                                        connectivity=1,
                                        in_place=True)
        else:
            print('(computing mask)...', end='')
            mask, _ = tissue_region_from_rgb(im, _min_area=500)

        row_min, col_min, row_max, col_max = bounding_box(mask)
        im_h[np.logical_not(mask)] = 0  # make sure background is 0
        mask = None
        im = None
        im_h = im_h[row_min:row_max + 1, col_min:col_max + 1]

        print("growing the bag...", end='')
        # -image bag growing
        bag = None  # bag for current image
        for d in local_descriptors:
            if bag is None:
                bag = grow_bag_from_new_image(im_h,
                                              d,
                                              wnd_size,
                                              nwindows,
                                              discard_empty=True)
            else:
                bag[d.name] = grow_bag_with_new_features(im_h, bag['regs'],
                                                         d)[d.name]

        # save the results for each image, one file per descriptor
        desc_names = bag.keys()
        desc_names.remove('regs')  # keep all keys but the regions
        # -save the ROI from the original image:
        res_file = res_path + '/' + 'roi-' + \
                   os.path.splitext(os.path.split(img_name)[1])[0] + '.dat'
        with open(res_file, 'w') as f:
            f.write('\t'.join(
                [str(x_) for x_ in [row_min, row_max, col_min, col_max]]))

        for dn in desc_names:
            res_file = res_path + '/' + dn + '_bag-' + \
                       os.path.splitext(os.path.split(img_name)[1])[0] + '.dat'
            with open(res_file, 'w') as f:
                n = len(bag[dn])  # total number of descriptors of this type
                for i in range(n):
                    s = '\t'.join([str(x_) for x_ in bag['regs'][i]]) + '\t' + \
                        '\t'.join([str(x_) for x_ in bag[dn][i]]) + '\n'
                    f.write(s)

        print('OK')

        bag = None
        gc.collect()
        gc.collect()