Example #1
0
def main():
    p = opt.ArgumentParser(description="""
            Segments a number of rectangular contexts from a H&E slide. The contexts are clusters
            of similar regions of the image. The similarity is based on various textural
            descriptors.
            """)
    p.add_argument('img_file', action='store', help='RGB image file')
    p.add_argument('ctxt',
                   action='store',
                   help='Number of contexts to extract',
                   type=int)
    p.add_argument('wsize',
                   action='store',
                   help='Size of the (square) regions',
                   type=int)
    p.add_argument(
        'roi',
        action='store',
        help='a file with ROI coordinates (and context descriptors)')
    p.add_argument('label',
                   action='store',
                   help='the cluster label of interest')

    p.add_argument('--prefix',
                   action='store',
                   help='optional prefix for the resulting files',
                   default=None)
    p.add_argument(
        '--gabor',
        action='store_true',
        help='compute Gabor descriptors and generate the corresponding contexts'
    )
    p.add_argument(
        '--lbp',
        action='store_true',
        help=
        'compute LBP (local binary patterns) descriptors and generate the corresponding contexts'
    )
    p.add_argument(
        '--mfs',
        action='store_true',
        help=
        'compute fractal descriptors and generate the corresponding contexts')
    p.add_argument('--eosine',
                   action='store_true',
                   help='should also Eosine component be processed?')

    p.add_argument('--scale',
                   action='store',
                   type=float,
                   default=1.0,
                   help='scaling factor for ROI coordinates')

    args = p.parse_args()

    base_name = os.path.basename(args.img_file).split('.')
    if len(base_name) > 1:  # at least 1 suffix .ext
        base_name.pop()  # drop the extension
        base_name = '.'.join(
            base_name)  # reassemble the rest of the list into file name

    if args.prefix is not None:
        pfx = args.prefix
    else:
        pfx = base_name

    ROIs = []
    for l in file(args.roi).readlines():
        # extract the coordinates and the label from each ROI
        # (one per row):
        lb, row_min, row_max, col_min, col_max = map(lambda _x: int(float(_x)),
                                                     l.split('\t')[1:5])
        row_min = int(mh.floor(row_min * args.scale))
        row_max = int(mh.floor(row_max * args.scale))
        col_min = int(mh.floor(col_min * args.scale))
        col_max = int(mh.floor(col_max * args.scale))
        if lb == args.label:
            ROIs.append([row_min, row_max, col_min, col_max])

    im = imread(args.img_file)
    print("Original image size:", im.shape)

    # get the H and E planes:
    h, e, _ = rgb2he2(im)

    if args.gabor:
        print("---------> Gabor descriptors:")
        g = GaborDescriptor()
        desc_label = 'gabor'

        print("------------> H plane")
        # on H-plane:
        img_iterator = sliding_window_on_regions(h.shape,
                                                 ROIs,
                                                 (args.wsize, args.wsize),
                                                 step=(args.wsize, args.wsize))
        dsc = get_local_desc(h, g, img_iterator, desc_label)

        dst = pdist_gabor(dsc)

        cl = average(dst)
        id = fcluster(cl, t=args.ctxt,
                      criterion='maxclust')  # get the various contexts

        # save clustering/contexts - remember, the coordinates are in the
        # current image system which might have been cropped from the original ->
        # should add back the shift
        z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
        z1[:, 0] += row_min + dh
        z1[:, 2] += col_min + dw
        z2 = np.matrix(id).transpose()
        z2 = np.hstack((z2, z1))
        np.savetxt(pfx + '_' + desc_label + '_h.dat', z2, delimiter="\t")

        # save visualizations
        for k in range(1, 1 + args.ctxt):
            i = np.where(id == k)[0]
            p = [dsc[j]['roi'] for j in i]
            im2 = enhance_patches(im, p)
            imsave(pfx + '_' + desc_label + '_h_' + str(k) + '.ppm', im2)

        if args.eosine:
            # repeat on E plane:
            print("------------> E plane")
            img_iterator = sliding_window_on_regions(h.shape,
                                                     ROIs,
                                                     (args.wsize, args.wsize),
                                                     step=(args.wsize,
                                                           args.wsize))
            dsc = get_local_desc(e, g, img_iterator, desc_label)

            dst = pdist_gabor(dsc)

            cl = average(dst)
            id = fcluster(cl, t=args.ctxt,
                          criterion='maxclust')  # get the various contexts

            # save clustering/contexts - remember, the coordinates are in the
            # current image system which might have been cropped from the original ->
            # should add back the shift
            z1 = desc_to_matrix(dsc,
                                desc_label)  # col 0: row_min, col 2: col_min
            z1[:, 0] += row_min + dh
            z1[:, 2] += col_min + dw
            z2 = np.matrix(id).transpose()
            z2 = np.hstack((z2, z1))
            np.savetxt(pfx + '_' + desc_label + '_e.dat', z2, delimiter="\t")

            # save visualizations
            for k in range(1, 1 + args.ctxt):
                i = np.where(id == k)[0]
                p = [dsc[j]['roi'] for j in i]
                im2 = enhance_patches(im, p)
                imsave(pfx + '_' + desc_label + '_e_' + str(k) + '.ppm', im2)

        print("OK")

    if args.haralick:
        print("---------> Haralick descriptors:")
        g = GLCMDescriptor()
        desc_label = 'haralick'

        print("------------> H plane")
        # on H-plane:
        img_iterator = sliding_window_on_regions(h.shape,
                                                 ROIs,
                                                 (args.wsize, args.wsize),
                                                 step=(args.wsize, args.wsize))
        dsc = get_local_desc(h, g, img_iterator, desc_label)

        dst = pdist_gabor(dsc)

        cl = average(dst)
        id = fcluster(cl, t=args.ctxt,
                      criterion='maxclust')  # get the various contexts

        # save clustering/contexts - remember, the coordinates are in the
        # current image system which might have been cropped from the original ->
        # should add back the shift
        z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
        z1[:, 0] += row_min + dh
        z1[:, 2] += col_min + dw
        z2 = np.matrix(id).transpose()
        z2 = np.hstack((z2, z1))
        np.savetxt(pfx + '_' + desc_label + '_h.dat', z2, delimiter="\t")

        # save visualizations
        for k in range(1, 1 + args.ctxt):
            i = np.where(id == k)[0]
            p = [dsc[j]['roi'] for j in i]
            im2 = enhance_patches(im, p)
            imsave(pfx + '_' + desc_label + '_h_' + str(k) + '.ppm', im2)

        if args.eosine:
            # repeat on E plane:
            print("------------> E plane")
            img_iterator = sliding_window_on_regions(h.shape,
                                                     ROIs,
                                                     (args.wsize, args.wsize),
                                                     step=(args.wsize,
                                                           args.wsize))
            dsc = get_local_desc(e, g, img_iterator, desc_label)

            dst = pdist_gabor(dsc)

            cl = average(dst)
            id = fcluster(cl, t=args.ctxt,
                          criterion='maxclust')  # get the various contexts

            # save clustering/contexts - remember, the coordinates are in the
            # current image system which might have been cropped from the original ->
            # should add back the shift
            z1 = desc_to_matrix(dsc,
                                desc_label)  # col 0: row_min, col 2: col_min
            z1[:, 0] += row_min + dh
            z1[:, 2] += col_min + dw
            z2 = np.matrix(id).transpose()
            z2 = np.hstack((z2, z1))
            np.savetxt(pfx + '_' + desc_label + '_e.dat', z2, delimiter="\t")

            # save visualizations
            for k in range(1, 1 + args.ctxt):
                i = np.where(id == k)[0]
                p = [dsc[j]['roi'] for j in i]
                im2 = enhance_patches(im, p)
                imsave(pfx + '_' + desc_label + '_e_' + str(k) + '.ppm', im2)

        print("OK")

    if args.lbp:
        print("---------> LBP descriptors:")
        g = LBPDescriptor()
        desc_label = 'lbp'

        # on H-plane:
        print("------------> H plane")
        img_iterator = sliding_window_on_regions(h.shape,
                                                 ROIs,
                                                 (args.wsize, args.wsize),
                                                 step=(args.wsize, args.wsize))
        dsc = get_local_desc(h, g, img_iterator, desc_label)

        dst = pdist_lbp(dsc)

        cl = average(dst)
        id = fcluster(cl, t=args.ctxt,
                      criterion='maxclust')  # get the various contexts

        # save clustering/contexts - remember, the coordinates are in the
        # current image system which might have been cropped from the original ->
        # should add back the shift
        z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
        z1[:, 0] += row_min + dh
        z1[:, 2] += col_min + dw
        z2 = np.matrix(id).transpose()
        z2 = np.hstack((z2, z1))
        np.savetxt(pfx + '_' + desc_label + '_h.dat', z2, delimiter="\t")

        # save visualizations
        for k in range(1, 1 + args.ctxt):
            i = np.where(id == k)[0]
            p = [dsc[j]['roi'] for j in i]
            im2 = enhance_patches(im, p)
            imsave(pfx + '_' + desc_label + '_h_' + str(k) + '.ppm', im2)

        if args.eosine:
            # repeat on E plane:
            print("------------> E plane")
            img_iterator = sliding_window_on_regions(h.shape,
                                                     ROIs,
                                                     (args.wsize, args.wsize),
                                                     step=(args.wsize,
                                                           args.wsize))
            dsc = get_local_desc(e, g, img_iterator, desc_label)

            dst = pdist_lbp(dsc)

            cl = average(dst)
            id = fcluster(cl, t=args.ctxt,
                          criterion='maxclust')  # get the various contexts

            # save clustering/contexts - remember, the coordinates are in the
            # current image system which might have been cropped from the original ->
            # should add back the shift
            z1 = desc_to_matrix(dsc,
                                desc_label)  # col 0: row_min, col 2: col_min
            z1[:, 0] += row_min + dh
            z1[:, 2] += col_min + dw
            z2 = np.matrix(id).transpose()
            z2 = np.hstack((z2, z1))
            np.savetxt(pfx + '_' + desc_label + '_e.dat', z2, delimiter="\t")

            # save visualizations
            for k in range(1, 1 + args.ctxt):
                i = np.where(id == k)[0]
                p = [dsc[j]['roi'] for j in i]
                im2 = enhance_patches(im, p)
                imsave(pfx + '_' + desc_label + '_e_' + str(k) + '.ppm', im2)

        print("OK")

    if args.mfs:
        print("---------> MFS descriptors:")
        g = MFSDescriptor()
        desc_label = 'mfs'

        # on H-plane:
        print("------------> H plane")
        img_iterator = sliding_window_on_regions(h.shape,
                                                 ROIs,
                                                 (args.wsize, args.wsize),
                                                 step=(args.wsize, args.wsize))
        dsc = get_local_desc(h, g, img_iterator, desc_label)

        dst = pdist_mfs(dsc)

        cl = average(dst)
        id = fcluster(cl, t=args.ctxt,
                      criterion='maxclust')  # get the various contexts

        # save clustering/contexts
        # save clustering/contexts - remember, the coordinates are in the
        # current image system which might have been cropped from the original ->
        # should add back the shift
        z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
        z1[:, 0] += row_min + dh
        z1[:, 2] += col_min + dw
        z2 = np.matrix(id).transpose()
        z2 = np.hstack((z2, z1))
        np.savetxt(pfx + '_' + desc_label + '_h.dat', z2, delimiter="\t")

        # save visualizations
        for k in range(1, 1 + args.ctxt):
            i = np.where(id == k)[0]
            p = [dsc[j]['roi'] for j in i]
            im2 = enhance_patches(im, p)
            imsave(pfx + '_' + desc_label + '_h_' + str(k) + '.ppm', im2)

        if args.eosine:
            # repeat on E plane:
            print("------------> E plane")
            img_iterator = sliding_window_on_regions(h.shape,
                                                     ROIs,
                                                     (args.wsize, args.wsize),
                                                     step=(args.wsize,
                                                           args.wsize))
            dsc = get_local_desc(e, g, img_iterator, desc_label)

            dst = pdist_mfs(dsc)

            cl = average(dst)
            id = fcluster(cl, t=args.ctxt,
                          criterion='maxclust')  # get the various contexts

            # save clustering/contexts - remember, the coordinates are in the
            # current image system which might have been cropped from the original ->
            # should add back the shift
            z1 = desc_to_matrix(dsc,
                                desc_label)  # col 0: row_min, col 2: col_min
            z1[:, 0] += row_min + dh
            z1[:, 2] += col_min + dw
            z2 = np.matrix(id).transpose()
            z2 = np.hstack((z2, z1))
            np.savetxt(pfx + '_' + desc_label + '_e.dat', z2, delimiter="\t")

            # save visualizations
            for k in range(1, 1 + args.ctxt):
                i = np.where(id == k)[0]
                p = [dsc[j]['roi'] for j in i]
                im2 = enhance_patches(im, p)
                imsave(pfx + '_' + desc_label + '_e_' + str(k) + '.ppm', im2)

        print("OK")

    return
Example #2
0
def grow_bag_from_new_image(image, desc, w_size, n_obj, **kwargs):
    """
    Extracts local descriptors from a new image.

    :param image: numpy.array
        Image data (single channel).
    :param desc: LocalDescriptor
        Local descriptor for feature extraction.
    :param w_size: tuple
        (width, height) of the sub-windows from the image.
    :param n_obj: int
        Maximum number of objects to be added to the bag.
    :param kwargs: dict
        Other parameters:
        'roi': region of interest (default: None)
        'sampling_strategy': how the image should be sampled:
            'random' for random sampling
            'sliding' for systematic, sliding window scanning
             of the image
        'it_start': where the scanning of the image starts (for
            sliding window sampling strategy) (default (0,0))
        'it_step': step from one window to the next (for
            sliding window sampling strategy) (default (1,1))
        'discard_empty': (boolean) whether an empy patch should still
            be processed or simply discarded. Default: False
    :return: dict
        A dictionary with two elements:
            <name of the descriptor>: list
            'regions': list
        The first list contains the feature descriptors.
        The second list contains the corresponding window positions.

    See also: grow_bag_with_new_features
    """

    if 'roi' not in kwargs:
        roi = None
    else:
        roi = kwargs['roi']

    if 'it_start' not in kwargs:
        it_start = (0,0)
    else:
        it_start = kwargs['it_start']

    if 'it_step' not in kwargs:
        it_step = (1,1)
    else:
        it_step = kwargs['it_step']

    if 'sampling_strategy' not in kwargs:
        sampling_strategy = 'random'
    else:
        sampling_strategy = kwargs['sampling_strategy']
        
    if 'discard_empty' in kwargs:
        discard_empty = kwargs['discard_empty']
    else:
        discard_empty = False

    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(image)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)
        w_size = (w_size[0] + w_offset[0], w_size[1] + w_offset[1])

    # create iterator:
    sampling_strategy = sampling_strategy.lower()
    if sampling_strategy == 'random':
        if roi is None:
            itw = random_window(image.shape, w_size, n_obj)
        else:
            itw = random_window_on_regions(image.shape, roi, w_size, n_obj)
    elif sampling_strategy == 'sliding':
        if roi is None:
            itw = sliding_window(image.shape, w_size, start=it_start, step=it_step)
        else:
            itw = sliding_window_on_regions(image.shape, roi, w_size, step=it_step)
    else:
        raise ValueError('Unknown strategy.')

    bag = []
    wnd = []
    n = 0

    for r in itw:
        if discard_empty and image[r[0]:r[1], r[2]:r[3]].sum() < 1e-16:
            continue

        # adjust if needed:
        r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
        wnd.append(r2)
        bag.append(desc.compute(image[r[0]:r[1], r[2]:r[3]]))

        n += 1
        if n > n_obj:
            break

    return {desc.name: bag, 'regs': wnd}
def main():
    p = opt.ArgumentParser(description="""
    Assigns the regions of an image to the clusters of a codebook.
    """)
    p.add_argument('image', action='store', help='image file name')
    p.add_argument('model', action='store', help='model file name')
    p.add_argument('out_file', action='store', help='results file name')
    p.add_argument('-r', '--roi', action='store', nargs=4, type=int,
                   help='region of interest from the image as: row_min row_max col_min col_max',
                   default=None)
    args = p.parse_args()

    wsize = 32
    tmp  = np.array([0.0, np.pi / 4.0, np.pi / 2.0, 3.0 * np.pi / 4.0],
        dtype=np.double)
    tmp2 = np.array([3.0 / 4.0, 3.0 / 8.0, 3.0 / 16.0], dtype=np.double)
    tmp3 = np.array([1.0, 2 * np.sqrt(2.0)], dtype=np.double)

    desc = GaborDescriptor(theta=tmp, freq=tmp2, sigma=tmp3)

    image = skimage.io.imread(args.image)
    if image.ndim == 3:
        im_h, _ = rgb2he(image, normalize=True)
        im_h = equalize_adapthist(im_h)
        im_h = rescale_intensity(im_h, out_range=(0,255))
        im_h = im_h.astype(np.uint8)
        image = im_h
        im_h = None

    if args.roi is None:
        roi = (0, image.shape[0]-1, 0, image.shape[1]-1)
    else:
        roi = args.roi

    with ModelPersistence(args.model, 'r', format='pickle') as mp:
        codebook = mp['codebook']
        avg_dist = None
        sd_dist = None
        if 'avg_dist_to_centroid' in mp:
            avg_dist = mp['avg_dist_to_centroid']
        if 'stddev_dist_to_centroid' in mp:
            sd_dist = mp['stddev_dist_to_centroid']


    itw = sliding_window_on_regions(image.shape, [tuple(roi)], (wsize,wsize), step=(wsize,wsize))
    wnd = []
    labels = []
    dists = []
    buff_size = 100                  # every <buff_size> patches we do a classification
    X = np.zeros((buff_size, codebook.cluster_centers_[0].shape[0]))

    k = 0
    for r in itw:
        # adjust if needed:
        r2 = (r[0], r[1], r[2], r[3])
        wnd.append(r2)
        X[k,:] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
        k += 1
        if k == buff_size:
            y = codebook.predict(X)
            Z = codebook.transform(X)
            labels.extend(y.tolist())
            dists.extend(Z[np.arange(buff_size), y].tolist())  # get the distances to the centroids of the assigned clusters
            k = 0                      # reset the block

    if k != 0:
        # it means some data is accumulated in X but not yet classified
        y = codebook.predict(X[0:k,])
        Z = codebook.transform(X[0:k,])
        labels.extend(y.tolist())
        dists.extend(Z[np.arange(k), y].tolist())  # get the distances to the centroids of the assigned clusters

    # save data
    with open(args.out_file, 'w') as f:
        n = len(wnd)                       # total number of descriptors of this type
        for k in range(n):
            s = '\t'.join([str(x_) for x_ in wnd[k]]) + '\t' + str(labels[k]) + \
                '\t' + str(dists[k]) + '\n'
            f.write(s)
Example #4
0
def main():
    p = opt.ArgumentParser(description="""
    Assigns the regions of an image to the clusters of a codebook.
    """)
    p.add_argument('image', action='store', help='image file name')
    p.add_argument('config', action='store', help='a configuration file')
    p.add_argument(
        '-r',
        '--roi',
        action='store',
        nargs=4,
        type=int,
        help=
        'region of interest from the image as: row_min row_max col_min col_max',
        default=None)
    args = p.parse_args()
    img_file = args.image
    cfg_file = args.config

    image_orig = skimage.io.imread(img_file)
    if image_orig.ndim == 3:
        im_h, _, _ = rgb2he2(image_orig)

    if args.roi is None:
        roi = (0, im_h.shape[0] - 1, 0, im_h.shape[1] - 1)
    else:
        roi = args.roi

    # Process configuration file:
    parser = SafeConfigParser()
    parser.read(cfg_file)

    if not parser.has_section('data'):
        raise RuntimeError('Section [data] is mandatory')
    wsize = (32, 32)
    if parser.has_option('data', 'window_size'):
        wsize = ast.literal_eval(parser.get('data', 'window_size'))

    if not parser.has_option('data', 'model'):
        raise RuntimeError('model file name is missing in [data] section')
    model_file = parser.get('data', 'model')
    with ModelPersistence(model_file, 'r', format='pickle') as mp:
        codebook = mp['codebook']
        Xm = mp['shift']
        Xs = mp['scale']
        standardize = mp['standardize']

    if parser.has_option('data', 'output'):
        out_file = parser.get('data', 'output')
    else:
        out_file = 'output.dat'

    descriptors = read_local_descriptors_cfg(parser)

    # For the moment, it is assumed tha only one type of local descriptors is
    # used - no composite feature vectors. This will change in the future but,
    # for the moment only the first type of descriptor in "descriptors" list
    # is used, and the codebook is assumed to be constructed using the same.

    desc = descriptors[0]

    print(img_file)
    print(wsize)
    print(roi[0], roi[1], roi[2], roi[3])

    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(im_h)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)
        wsize = (wsize[0] + w_offset[0], wsize[1] + w_offset[1])
    else:
        image = im_h

    itw = sliding_window_on_regions(image.shape, [tuple(roi)],
                                    wsize,
                                    step=wsize)
    wnd = []
    labels = []
    buff_size = 10000  # every <buff_size> patches we do a classification
    X = np.zeros((buff_size, codebook.cluster_centers_[0].shape[0]))
    k = 0
    if standardize:  # placed here, to avoid testing inside the loop
        for r in itw:
            # adjust if needed:
            r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
            wnd.append(r2)
            X[k, :] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
            k += 1
            if k == buff_size:
                X = (X - Xm) / Xs
                labels.extend(codebook.predict(X).tolist())
                k = 0  # reset the block
    else:
        for r in itw:
            # adjust if needed:
            r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
            wnd.append(r2)
            X[k, :] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
            k += 1
            if k == buff_size:
                labels.extend(codebook.predict(X).tolist())
                k = 0  # reset the block

    if k != 0:
        # it means some data is accumulated in X but not yet classified
        if standardize:
            X[0:k + 1, ] = (X[0:k + 1, ] - Xm) / Xs
        labels.extend(codebook.predict(X[0:k + 1, ]).tolist())

    with open(out_file, 'w') as f:
        n = len(wnd)  # total number of descriptors of this type
        for k in range(n):
            s = '\t'.join([str(x_)
                           for x_ in wnd[k]]) + '\t' + str(labels[k]) + '\n'
            f.write(s)
Example #5
0
def main():
    p = opt.ArgumentParser(description="""
            Segments a number of rectangular contexts from a H&E slide. The contexts are clusters
            of similar regions of the image. The similarity is based on various textural
            descriptors.
            """)
    p.add_argument('img_file', action='store', help='RGB image file')
    p.add_argument('ctxt', action='store', help='Number of contexts to extract', type=int)
    p.add_argument('wsize', action='store', help='Size of the (square) regions', type=int)
    p.add_argument('roi', action='store', help='a file with ROI coordinates (and context descriptors)')
    p.add_argument('label', action='store', help='the cluster label of interest')

    p.add_argument('--prefix', action='store',
                   help='optional prefix for the resulting files',
                   default=None)
    p.add_argument('--gabor', action='store_true',
                   help='compute Gabor descriptors and generate the corresponding contexts')
    p.add_argument('--lbp', action='store_true',
                   help='compute LBP (local binary patterns) descriptors and generate the corresponding contexts')
    p.add_argument('--mfs', action='store_true',
                   help='compute fractal descriptors and generate the corresponding contexts')
    p.add_argument('--eosine', action='store_true', help='should also Eosine component be processed?')

    p.add_argument('--scale', action='store', type=float, default=1.0,
                   help='scaling factor for ROI coordinates')


    args = p.parse_args()

    base_name = os.path.basename(args.img_file).split('.')
    if len(base_name) > 1:             # at least 1 suffix .ext
        base_name.pop()                # drop the extension
        base_name = '.'.join(base_name)  # reassemble the rest of the list into file name

    if args.prefix is not None:
        pfx = args.prefix
    else:
        pfx = base_name


    ROIs = []
    for l in file(args.roi).readlines():
        # extract the coordinates and the label from each ROI
        # (one per row):
        lb, row_min, row_max, col_min, col_max = map(lambda _x: int(float(_x)), l.split('\t')[1:5])
        row_min = int(mh.floor(row_min * args.scale))
        row_max = int(mh.floor(row_max * args.scale))
        col_min = int(mh.floor(col_min * args.scale))
        col_max = int(mh.floor(col_max * args.scale))
        if lb == args.label:
            ROIs.append([row_min, row_max, col_min, col_max])

    im = imread(args.img_file)
    print("Original image size:", im.shape)

    # get the H and E planes:
    h, e, _ = rgb2he2(im)

    if args.gabor:
        print("---------> Gabor descriptors:")
        g = GaborDescriptor()
        desc_label = 'gabor'

        print("------------> H plane")
        # on H-plane:
        img_iterator = sliding_window_on_regions(h.shape, ROIs, (args.wsize,args.wsize),
                                                 step=(args.wsize,args.wsize))
        dsc = get_local_desc(h, g, img_iterator, desc_label)

        dst = pdist_gabor(dsc)

        cl = average(dst)
        id = fcluster(cl, t=args.ctxt, criterion='maxclust')  # get the various contexts

        # save clustering/contexts - remember, the coordinates are in the
        # current image system which might have been cropped from the original ->
        # should add back the shift
        z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
        z1[:, 0] += row_min + dh
        z1[:, 2] += col_min + dw
        z2 = np.matrix(id).transpose()
        z2 = np.hstack( (z2, z1) )
        np.savetxt(pfx+'_'+desc_label+'_h.dat', z2, delimiter="\t")

        # save visualizations
        for k in range(1,1+args.ctxt):
            i = np.where(id == k)[0]
            p = [dsc[j]['roi'] for j in i]
            im2 = enhance_patches(im, p)
            imsave(pfx+'_'+desc_label+'_h_'+str(k)+'.ppm', im2)

        if args.eosine:
            # repeat on E plane:
            print("------------> E plane")
            img_iterator = sliding_window_on_regions(h.shape, ROIs, (args.wsize,args.wsize),
                                                     step=(args.wsize,args.wsize))
            dsc = get_local_desc(e, g, img_iterator, desc_label)

            dst = pdist_gabor(dsc)

            cl = average(dst)
            id = fcluster(cl, t=args.ctxt, criterion='maxclust')  # get the various contexts

            # save clustering/contexts - remember, the coordinates are in the
            # current image system which might have been cropped from the original ->
            # should add back the shift
            z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
            z1[:, 0] += row_min + dh
            z1[:, 2] += col_min + dw
            z2 = np.matrix(id).transpose()
            z2 = np.hstack( (z2, z1) )
            np.savetxt(pfx+'_'+desc_label+'_e.dat', z2, delimiter="\t")

            # save visualizations
            for k in range(1,1+args.ctxt):
                i = np.where(id == k)[0]
                p = [dsc[j]['roi'] for j in i]
                im2 = enhance_patches(im, p)
                imsave(pfx+'_'+desc_label+'_e_'+str(k)+'.ppm', im2)

        print("OK")

    if args.haralick:
        print("---------> Haralick descriptors:")
        g = GLCMDescriptor()
        desc_label = 'haralick'

        print("------------> H plane")
        # on H-plane:
        img_iterator = sliding_window_on_regions(h.shape, ROIs, (args.wsize,args.wsize),
                                                 step=(args.wsize,args.wsize))
        dsc = get_local_desc(h, g, img_iterator, desc_label)

        dst = pdist_gabor(dsc)

        cl = average(dst)
        id = fcluster(cl, t=args.ctxt, criterion='maxclust')  # get the various contexts

        # save clustering/contexts - remember, the coordinates are in the
        # current image system which might have been cropped from the original ->
        # should add back the shift
        z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
        z1[:, 0] += row_min + dh
        z1[:, 2] += col_min + dw
        z2 = np.matrix(id).transpose()
        z2 = np.hstack( (z2, z1) )
        np.savetxt(pfx+'_'+desc_label+'_h.dat', z2, delimiter="\t")

        # save visualizations
        for k in range(1,1+args.ctxt):
            i = np.where(id == k)[0]
            p = [dsc[j]['roi'] for j in i]
            im2 = enhance_patches(im, p)
            imsave(pfx+'_'+desc_label+'_h_'+str(k)+'.ppm', im2)

        if args.eosine:
            # repeat on E plane:
            print("------------> E plane")
            img_iterator = sliding_window_on_regions(h.shape, ROIs, (args.wsize,args.wsize),
                                                     step=(args.wsize,args.wsize))
            dsc = get_local_desc(e, g, img_iterator, desc_label)

            dst = pdist_gabor(dsc)

            cl = average(dst)
            id = fcluster(cl, t=args.ctxt, criterion='maxclust')  # get the various contexts

            # save clustering/contexts - remember, the coordinates are in the
            # current image system which might have been cropped from the original ->
            # should add back the shift
            z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
            z1[:, 0] += row_min + dh
            z1[:, 2] += col_min + dw
            z2 = np.matrix(id).transpose()
            z2 = np.hstack( (z2, z1) )
            np.savetxt(pfx+'_'+desc_label+'_e.dat', z2, delimiter="\t")

            # save visualizations
            for k in range(1,1+args.ctxt):
                i = np.where(id == k)[0]
                p = [dsc[j]['roi'] for j in i]
                im2 = enhance_patches(im, p)
                imsave(pfx+'_'+desc_label+'_e_'+str(k)+'.ppm', im2)

        print("OK")

    if args.lbp:
        print("---------> LBP descriptors:")
        g = LBPDescriptor()
        desc_label = 'lbp'

        # on H-plane:
        print("------------> H plane")
        img_iterator = sliding_window_on_regions(h.shape, ROIs, (args.wsize,args.wsize),
                                                 step=(args.wsize,args.wsize))
        dsc = get_local_desc(h, g, img_iterator, desc_label)

        dst = pdist_lbp(dsc)

        cl = average(dst)
        id = fcluster(cl, t=args.ctxt, criterion='maxclust')  # get the various contexts

        # save clustering/contexts - remember, the coordinates are in the
        # current image system which might have been cropped from the original ->
        # should add back the shift
        z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
        z1[:, 0] += row_min + dh
        z1[:, 2] += col_min + dw
        z2 = np.matrix(id).transpose()
        z2 = np.hstack( (z2, z1) )
        np.savetxt(pfx+'_'+desc_label+'_h.dat', z2, delimiter="\t")

        # save visualizations
        for k in range(1,1+args.ctxt):
            i = np.where(id == k)[0]
            p = [dsc[j]['roi'] for j in i]
            im2 = enhance_patches(im, p)
            imsave(pfx+'_'+desc_label+'_h_'+str(k)+'.ppm', im2)

        if args.eosine:
            # repeat on E plane:
            print("------------> E plane")
            img_iterator = sliding_window_on_regions(h.shape, ROIs, (args.wsize,args.wsize),
                                                     step=(args.wsize,args.wsize))
            dsc = get_local_desc(e, g, img_iterator, desc_label)

            dst = pdist_lbp(dsc)

            cl = average(dst)
            id = fcluster(cl, t=args.ctxt, criterion='maxclust')  # get the various contexts

            # save clustering/contexts - remember, the coordinates are in the
            # current image system which might have been cropped from the original ->
            # should add back the shift
            z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
            z1[:, 0] += row_min + dh
            z1[:, 2] += col_min + dw
            z2 = np.matrix(id).transpose()
            z2 = np.hstack( (z2, z1) )
            np.savetxt(pfx+'_'+desc_label+'_e.dat', z2, delimiter="\t")

            # save visualizations
            for k in range(1,1+args.ctxt):
                i = np.where(id == k)[0]
                p = [dsc[j]['roi'] for j in i]
                im2 = enhance_patches(im, p)
                imsave(pfx+'_'+desc_label+'_e_'+str(k)+'.ppm', im2)

        print("OK")

    if args.mfs:
        print("---------> MFS descriptors:")
        g = MFSDescriptor()
        desc_label = 'mfs'

        # on H-plane:
        print("------------> H plane")
        img_iterator = sliding_window_on_regions(h.shape, ROIs, (args.wsize,args.wsize),
                                                 step=(args.wsize,args.wsize))
        dsc = get_local_desc(h, g, img_iterator, desc_label)

        dst = pdist_mfs(dsc)

        cl = average(dst)
        id = fcluster(cl, t=args.ctxt, criterion='maxclust')  # get the various contexts

        # save clustering/contexts
        # save clustering/contexts - remember, the coordinates are in the
        # current image system which might have been cropped from the original ->
        # should add back the shift
        z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
        z1[:, 0] += row_min + dh
        z1[:, 2] += col_min + dw
        z2 = np.matrix(id).transpose()
        z2 = np.hstack( (z2, z1) )
        np.savetxt(pfx+'_'+desc_label+'_h.dat', z2, delimiter="\t")

        # save visualizations
        for k in range(1,1+args.ctxt):
            i = np.where(id == k)[0]
            p = [dsc[j]['roi'] for j in i]
            im2 = enhance_patches(im, p)
            imsave(pfx+'_'+desc_label+'_h_'+str(k)+'.ppm', im2)

        if args.eosine:
            # repeat on E plane:
            print("------------> E plane")
            img_iterator = sliding_window_on_regions(h.shape, ROIs, (args.wsize,args.wsize),
                                                     step=(args.wsize,args.wsize))
            dsc = get_local_desc(e, g, img_iterator, desc_label)

            dst = pdist_mfs(dsc)

            cl = average(dst)
            id = fcluster(cl, t=args.ctxt, criterion='maxclust')  # get the various contexts

            # save clustering/contexts - remember, the coordinates are in the
            # current image system which might have been cropped from the original ->
            # should add back the shift
            z1 = desc_to_matrix(dsc, desc_label)  # col 0: row_min, col 2: col_min
            z1[:, 0] += row_min + dh
            z1[:, 2] += col_min + dw
            z2 = np.matrix(id).transpose()
            z2 = np.hstack( (z2, z1) )
            np.savetxt(pfx+'_'+desc_label+'_e.dat', z2, delimiter="\t")

            # save visualizations
            for k in range(1,1+args.ctxt):
                i = np.where(id == k)[0]
                p = [dsc[j]['roi'] for j in i]
                im2 = enhance_patches(im, p)
                imsave(pfx+'_'+desc_label+'_e_'+str(k)+'.ppm', im2)

        print("OK")

    return
Example #6
0
def main():
    p = opt.ArgumentParser(description="""
    Assigns the regions of an image to the clusters of a codebook.
    """)
    p.add_argument('image', action='store', help='image file name')
    p.add_argument('config', action='store', help='a configuration file')
    p.add_argument('-r', '--roi', action='store', nargs=4, type=int,
                   help='region of interest from the image as: row_min row_max col_min col_max',
                   default=None)
    args = p.parse_args()
    img_file = args.image
    cfg_file = args.config

    image_orig = skimage.io.imread(img_file)
    if image_orig.ndim == 3:
        im_h, _, _ = rgb2he2(image_orig)

    if args.roi is None:
        roi = (0, img.shape[0]-1, 0, img.shape[1]-1)
    else:
        roi = args.roi

    # Process configuration file:
    parser = SafeConfigParser()
    parser.read(cfg_file)

    if not parser.has_section('data'):
        raise RuntimeError('Section [data] is mandatory')
    wsize = (32, 32)
    if parser.has_option('data', 'window_size'):
        wsize = ast.literal_eval(parser.get('data', 'window_size'))

    if not parser.has_option('data', 'model'):
        raise RuntimeError('model file name is missing in [data] section')
    model_file = parser.get('data', 'model')
    with ModelPersistence(model_file, 'r', format='pickle') as mp:
        codebook = mp['codebook']
        Xm = mp['shift']
        Xs = mp['scale']
        standardize = mp['standardize']

    if parser.has_option('data', 'output'):
        out_file = parser.get('data', 'output')
    else:
        out_file = 'output.dat'

    descriptors = read_local_descriptors_cfg(parser)

    # For the moment, it is assumed tha only one type of local descriptors is
    # used - no composite feature vectors. This will change in the future but,
    # for the moment only the first type of descriptor in "descriptors" list
    # is used, and the codebook is assumed to be constructed using the same.

    desc = descriptors[0]

    print(img_file)
    print(wsize)
    print(roi[0], roi[1], roi[2], roi[3])


    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(im_h)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)
        wsize = (wsize[0] + w_offset[0], wsize[1] + w_offset[1])
    else:
        image = im_h

    itw = sliding_window_on_regions(image.shape, [tuple(roi)], wsize, step=wsize)
    wnd = []
    labels = []
    buff_size = 10000                  # every <buff_size> patches we do a classification
    X = np.zeros((buff_size, codebook.cluster_centers_[0].shape[0]))
    k = 0
    if standardize:                    # placed here, to avoid testing inside the loop
        for r in itw:
            # adjust if needed:
            r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
            wnd.append(r2)
            X[k,:] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
            k += 1
            if k == buff_size:
                X = (X - Xm) / Xs
                labels.extend(codebook.predict(X).tolist())
                k = 0                      # reset the block
    else:
        for r in itw:
            # adjust if needed:
            r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
            wnd.append(r2)
            X[k,:] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
            k += 1
            if k == buff_size:
                labels.extend(codebook.predict(X).tolist())
                k = 0                      # reset the block

    if k != 0:
        # it means some data is accumulated in X but not yet classified
        if standardize:
            X[0:k+1,] = (X[0:k+1,] - Xm) / Xs
        labels.extend(codebook.predict(X[0:k+1,]).tolist())

    with open(out_file, 'w') as f:
        n = len(wnd)                       # total number of descriptors of this type
        for k in range(n):
            s = '\t'.join([str(x_) for x_ in wnd[k]]) + '\t' + str(labels[k]) + '\n'
            f.write(s)
Example #7
0
def grow_bag_from_new_image(image, desc, w_size, n_obj, **kwargs):
    """
    Extracts local descriptors from a new image.

    :param image: numpy.array
        Image data (single channel).
    :param desc: LocalDescriptor
        Local descriptor for feature extraction.
    :param w_size: tuple
        (width, height) of the sub-windows from the image.
    :param n_obj: int
        Maximum number of objects to be added to the bag.
    :param kwargs: dict
        Other parameters:
        'roi': region of interest (default: None)
        'sampling_strategy': how the image should be sampled:
            'random' for random sampling
            'sliding' for systematic, sliding window scanning
             of the image
        'it_start': where the scanning of the image starts (for
            sliding window sampling strategy) (default (0,0))
        'it_step': step from one window to the next (for
            sliding window sampling strategy) (default (1,1))
        'discard_empty': (boolean) whether an empy patch should still
            be processed or simply discarded. Default: False
    :return: dict
        A dictionary with two elements:
            <name of the descriptor>: list
            'regions': list
        The first list contains the feature descriptors.
        The second list contains the corresponding window positions.

    See also: grow_bag_with_new_features
    """

    if 'roi' not in kwargs:
        roi = None
    else:
        roi = kwargs['roi']

    if 'it_start' not in kwargs:
        it_start = (0, 0)
    else:
        it_start = kwargs['it_start']

    if 'it_step' not in kwargs:
        it_step = (1, 1)
    else:
        it_step = kwargs['it_step']

    if 'sampling_strategy' not in kwargs:
        sampling_strategy = 'random'
    else:
        sampling_strategy = kwargs['sampling_strategy']

    if 'discard_empty' in kwargs:
        discard_empty = kwargs['discard_empty']
    else:
        discard_empty = False

    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(image)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)
        w_size = (w_size[0] + w_offset[0], w_size[1] + w_offset[1])

    # create iterator:
    sampling_strategy = sampling_strategy.lower()
    if sampling_strategy == 'random':
        if roi is None:
            itw = random_window(image.shape, w_size, n_obj)
        else:
            itw = random_window_on_regions(image.shape, roi, w_size, n_obj)
    elif sampling_strategy == 'sliding':
        if roi is None:
            itw = sliding_window(image.shape,
                                 w_size,
                                 start=it_start,
                                 step=it_step)
        else:
            itw = sliding_window_on_regions(image.shape,
                                            roi,
                                            w_size,
                                            step=it_step)
    else:
        raise ValueError('Unknown strategy.')

    bag = []
    wnd = []
    n = 0

    for r in itw:
        if discard_empty and image[r[0]:r[1], r[2]:r[3]].sum() < 1e-16:
            continue

        # adjust if needed:
        r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
        wnd.append(r2)
        bag.append(desc.compute(image[r[0]:r[1], r[2]:r[3]]))

        n += 1
        if n > n_obj:
            break

    return {desc.name: bag, 'regs': wnd}