Exemple #1
0
def grow_bag_with_new_features(image, regions, desc):
    """
    Returns the features corresponding to a list of regions. This
    is usually used for adding new features to an existing bag,
    where the list of regions has been obtained from an iterator.

    :param image: numpy.array
        Image data (single channel).
    :param regions: list
        A list of regions [(row_min, row_max, col_min, col_max),...]
    :param desc: LocalDescriptor
        Descriptor used for extracting the features.
    :return: dict
        A dictionary with two elements:
            <name of the descriptor>: list
            'regions': list
        The first list contains the feature descriptors.
        The second list contains the corresponding window positions
        (identical with the input regions list)

    See also: grow_bag_from_new_image
    """

    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(image)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)

    bag = []

    for w in regions:
        # adjust if needed:
        r = (w[0], w[1] + w_offset[1], w[2], w[3] + w_offset[0])
        bag.append(desc.compute(image[r[0]:r[1], r[2]:r[3]]))

    return {desc.name: bag, 'regs': regions}
Exemple #2
0
def grow_bag_with_new_features(image, regions, desc):
    """
    Returns the features corresponding to a list of regions. This
    is usually used for adding new features to an existing bag,
    where the list of regions has been obtained from an iterator.

    :param image: numpy.array
        Image data (single channel).
    :param regions: list
        A list of regions [(row_min, row_max, col_min, col_max),...]
    :param desc: LocalDescriptor
        Descriptor used for extracting the features.
    :return: dict
        A dictionary with two elements:
            <name of the descriptor>: list
            'regions': list
        The first list contains the feature descriptors.
        The second list contains the corresponding window positions
        (identical with the input regions list)

    See also: grow_bag_from_new_image
    """

    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(image)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)

    bag = []

    for w in regions:
        # adjust if needed:
        r = (w[0], w[1] + w_offset[1], w[2], w[3] + w_offset[0])
        bag.append(desc.compute(image[r[0]:r[1], r[2]:r[3]]))

    return {desc.name: bag, 'regs': regions}
Exemple #3
0
def grow_bag_from_new_image(image, desc, w_size, n_obj, **kwargs):
    """
    Extracts local descriptors from a new image.

    :param image: numpy.array
        Image data (single channel).
    :param desc: LocalDescriptor
        Local descriptor for feature extraction.
    :param w_size: tuple
        (width, height) of the sub-windows from the image.
    :param n_obj: int
        Maximum number of objects to be added to the bag.
    :param kwargs: dict
        Other parameters:
        'roi': region of interest (default: None)
        'sampling_strategy': how the image should be sampled:
            'random' for random sampling
            'sliding' for systematic, sliding window scanning
             of the image
        'it_start': where the scanning of the image starts (for
            sliding window sampling strategy) (default (0,0))
        'it_step': step from one window to the next (for
            sliding window sampling strategy) (default (1,1))
        'discard_empty': (boolean) whether an empy patch should still
            be processed or simply discarded. Default: False
    :return: dict
        A dictionary with two elements:
            <name of the descriptor>: list
            'regions': list
        The first list contains the feature descriptors.
        The second list contains the corresponding window positions.

    See also: grow_bag_with_new_features
    """

    if 'roi' not in kwargs:
        roi = None
    else:
        roi = kwargs['roi']

    if 'it_start' not in kwargs:
        it_start = (0,0)
    else:
        it_start = kwargs['it_start']

    if 'it_step' not in kwargs:
        it_step = (1,1)
    else:
        it_step = kwargs['it_step']

    if 'sampling_strategy' not in kwargs:
        sampling_strategy = 'random'
    else:
        sampling_strategy = kwargs['sampling_strategy']
        
    if 'discard_empty' in kwargs:
        discard_empty = kwargs['discard_empty']
    else:
        discard_empty = False

    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(image)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)
        w_size = (w_size[0] + w_offset[0], w_size[1] + w_offset[1])

    # create iterator:
    sampling_strategy = sampling_strategy.lower()
    if sampling_strategy == 'random':
        if roi is None:
            itw = random_window(image.shape, w_size, n_obj)
        else:
            itw = random_window_on_regions(image.shape, roi, w_size, n_obj)
    elif sampling_strategy == 'sliding':
        if roi is None:
            itw = sliding_window(image.shape, w_size, start=it_start, step=it_step)
        else:
            itw = sliding_window_on_regions(image.shape, roi, w_size, step=it_step)
    else:
        raise ValueError('Unknown strategy.')

    bag = []
    wnd = []
    n = 0

    for r in itw:
        if discard_empty and image[r[0]:r[1], r[2]:r[3]].sum() < 1e-16:
            continue

        # adjust if needed:
        r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
        wnd.append(r2)
        bag.append(desc.compute(image[r[0]:r[1], r[2]:r[3]]))

        n += 1
        if n > n_obj:
            break

    return {desc.name: bag, 'regs': wnd}
Exemple #4
0
def main():
    p = opt.ArgumentParser(description="""
    Assigns the regions of an image to the clusters of a codebook.
    """)
    p.add_argument('image', action='store', help='image file name')
    p.add_argument('config', action='store', help='a configuration file')
    p.add_argument(
        '-r',
        '--roi',
        action='store',
        nargs=4,
        type=int,
        help=
        'region of interest from the image as: row_min row_max col_min col_max',
        default=None)
    args = p.parse_args()
    img_file = args.image
    cfg_file = args.config

    image_orig = skimage.io.imread(img_file)
    if image_orig.ndim == 3:
        im_h, _, _ = rgb2he2(image_orig)

    if args.roi is None:
        roi = (0, im_h.shape[0] - 1, 0, im_h.shape[1] - 1)
    else:
        roi = args.roi

    # Process configuration file:
    parser = SafeConfigParser()
    parser.read(cfg_file)

    if not parser.has_section('data'):
        raise RuntimeError('Section [data] is mandatory')
    wsize = (32, 32)
    if parser.has_option('data', 'window_size'):
        wsize = ast.literal_eval(parser.get('data', 'window_size'))

    if not parser.has_option('data', 'model'):
        raise RuntimeError('model file name is missing in [data] section')
    model_file = parser.get('data', 'model')
    with ModelPersistence(model_file, 'r', format='pickle') as mp:
        codebook = mp['codebook']
        Xm = mp['shift']
        Xs = mp['scale']
        standardize = mp['standardize']

    if parser.has_option('data', 'output'):
        out_file = parser.get('data', 'output')
    else:
        out_file = 'output.dat'

    descriptors = read_local_descriptors_cfg(parser)

    # For the moment, it is assumed tha only one type of local descriptors is
    # used - no composite feature vectors. This will change in the future but,
    # for the moment only the first type of descriptor in "descriptors" list
    # is used, and the codebook is assumed to be constructed using the same.

    desc = descriptors[0]

    print(img_file)
    print(wsize)
    print(roi[0], roi[1], roi[2], roi[3])

    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(im_h)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)
        wsize = (wsize[0] + w_offset[0], wsize[1] + w_offset[1])
    else:
        image = im_h

    itw = sliding_window_on_regions(image.shape, [tuple(roi)],
                                    wsize,
                                    step=wsize)
    wnd = []
    labels = []
    buff_size = 10000  # every <buff_size> patches we do a classification
    X = np.zeros((buff_size, codebook.cluster_centers_[0].shape[0]))
    k = 0
    if standardize:  # placed here, to avoid testing inside the loop
        for r in itw:
            # adjust if needed:
            r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
            wnd.append(r2)
            X[k, :] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
            k += 1
            if k == buff_size:
                X = (X - Xm) / Xs
                labels.extend(codebook.predict(X).tolist())
                k = 0  # reset the block
    else:
        for r in itw:
            # adjust if needed:
            r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
            wnd.append(r2)
            X[k, :] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
            k += 1
            if k == buff_size:
                labels.extend(codebook.predict(X).tolist())
                k = 0  # reset the block

    if k != 0:
        # it means some data is accumulated in X but not yet classified
        if standardize:
            X[0:k + 1, ] = (X[0:k + 1, ] - Xm) / Xs
        labels.extend(codebook.predict(X[0:k + 1, ]).tolist())

    with open(out_file, 'w') as f:
        n = len(wnd)  # total number of descriptors of this type
        for k in range(n):
            s = '\t'.join([str(x_)
                           for x_ in wnd[k]]) + '\t' + str(labels[k]) + '\n'
            f.write(s)
Exemple #5
0
def main():
    p = opt.ArgumentParser(description="""
    Assigns the regions of an image to the clusters of a codebook.
    """)
    p.add_argument('image', action='store', help='image file name')
    p.add_argument('config', action='store', help='a configuration file')
    p.add_argument('-r', '--roi', action='store', nargs=4, type=int,
                   help='region of interest from the image as: row_min row_max col_min col_max',
                   default=None)
    args = p.parse_args()
    img_file = args.image
    cfg_file = args.config

    image_orig = skimage.io.imread(img_file)
    if image_orig.ndim == 3:
        im_h, _, _ = rgb2he2(image_orig)

    if args.roi is None:
        roi = (0, img.shape[0]-1, 0, img.shape[1]-1)
    else:
        roi = args.roi

    # Process configuration file:
    parser = SafeConfigParser()
    parser.read(cfg_file)

    if not parser.has_section('data'):
        raise RuntimeError('Section [data] is mandatory')
    wsize = (32, 32)
    if parser.has_option('data', 'window_size'):
        wsize = ast.literal_eval(parser.get('data', 'window_size'))

    if not parser.has_option('data', 'model'):
        raise RuntimeError('model file name is missing in [data] section')
    model_file = parser.get('data', 'model')
    with ModelPersistence(model_file, 'r', format='pickle') as mp:
        codebook = mp['codebook']
        Xm = mp['shift']
        Xs = mp['scale']
        standardize = mp['standardize']

    if parser.has_option('data', 'output'):
        out_file = parser.get('data', 'output')
    else:
        out_file = 'output.dat'

    descriptors = read_local_descriptors_cfg(parser)

    # For the moment, it is assumed tha only one type of local descriptors is
    # used - no composite feature vectors. This will change in the future but,
    # for the moment only the first type of descriptor in "descriptors" list
    # is used, and the codebook is assumed to be constructed using the same.

    desc = descriptors[0]

    print(img_file)
    print(wsize)
    print(roi[0], roi[1], roi[2], roi[3])


    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(im_h)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)
        wsize = (wsize[0] + w_offset[0], wsize[1] + w_offset[1])
    else:
        image = im_h

    itw = sliding_window_on_regions(image.shape, [tuple(roi)], wsize, step=wsize)
    wnd = []
    labels = []
    buff_size = 10000                  # every <buff_size> patches we do a classification
    X = np.zeros((buff_size, codebook.cluster_centers_[0].shape[0]))
    k = 0
    if standardize:                    # placed here, to avoid testing inside the loop
        for r in itw:
            # adjust if needed:
            r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
            wnd.append(r2)
            X[k,:] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
            k += 1
            if k == buff_size:
                X = (X - Xm) / Xs
                labels.extend(codebook.predict(X).tolist())
                k = 0                      # reset the block
    else:
        for r in itw:
            # adjust if needed:
            r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
            wnd.append(r2)
            X[k,:] = desc.compute(image[r[0]:r[1], r[2]:r[3]])
            k += 1
            if k == buff_size:
                labels.extend(codebook.predict(X).tolist())
                k = 0                      # reset the block

    if k != 0:
        # it means some data is accumulated in X but not yet classified
        if standardize:
            X[0:k+1,] = (X[0:k+1,] - Xm) / Xs
        labels.extend(codebook.predict(X[0:k+1,]).tolist())

    with open(out_file, 'w') as f:
        n = len(wnd)                       # total number of descriptors of this type
        for k in range(n):
            s = '\t'.join([str(x_) for x_ in wnd[k]]) + '\t' + str(labels[k]) + '\n'
            f.write(s)
Exemple #6
0
def grow_bag_from_new_image(image, desc, w_size, n_obj, **kwargs):
    """
    Extracts local descriptors from a new image.

    :param image: numpy.array
        Image data (single channel).
    :param desc: LocalDescriptor
        Local descriptor for feature extraction.
    :param w_size: tuple
        (width, height) of the sub-windows from the image.
    :param n_obj: int
        Maximum number of objects to be added to the bag.
    :param kwargs: dict
        Other parameters:
        'roi': region of interest (default: None)
        'sampling_strategy': how the image should be sampled:
            'random' for random sampling
            'sliding' for systematic, sliding window scanning
             of the image
        'it_start': where the scanning of the image starts (for
            sliding window sampling strategy) (default (0,0))
        'it_step': step from one window to the next (for
            sliding window sampling strategy) (default (1,1))
        'discard_empty': (boolean) whether an empy patch should still
            be processed or simply discarded. Default: False
    :return: dict
        A dictionary with two elements:
            <name of the descriptor>: list
            'regions': list
        The first list contains the feature descriptors.
        The second list contains the corresponding window positions.

    See also: grow_bag_with_new_features
    """

    if 'roi' not in kwargs:
        roi = None
    else:
        roi = kwargs['roi']

    if 'it_start' not in kwargs:
        it_start = (0, 0)
    else:
        it_start = kwargs['it_start']

    if 'it_step' not in kwargs:
        it_step = (1, 1)
    else:
        it_step = kwargs['it_step']

    if 'sampling_strategy' not in kwargs:
        sampling_strategy = 'random'
    else:
        sampling_strategy = kwargs['sampling_strategy']

    if 'discard_empty' in kwargs:
        discard_empty = kwargs['discard_empty']
    else:
        discard_empty = False

    w_offset = (0, 0)
    if isinstance(desc, HaarLikeDescriptor):
        # this one works on integral images
        image = intg_image(image)
        # the sliding window should also be increased by 1:
        w_offset = (1, 1)
        w_size = (w_size[0] + w_offset[0], w_size[1] + w_offset[1])

    # create iterator:
    sampling_strategy = sampling_strategy.lower()
    if sampling_strategy == 'random':
        if roi is None:
            itw = random_window(image.shape, w_size, n_obj)
        else:
            itw = random_window_on_regions(image.shape, roi, w_size, n_obj)
    elif sampling_strategy == 'sliding':
        if roi is None:
            itw = sliding_window(image.shape,
                                 w_size,
                                 start=it_start,
                                 step=it_step)
        else:
            itw = sliding_window_on_regions(image.shape,
                                            roi,
                                            w_size,
                                            step=it_step)
    else:
        raise ValueError('Unknown strategy.')

    bag = []
    wnd = []
    n = 0

    for r in itw:
        if discard_empty and image[r[0]:r[1], r[2]:r[3]].sum() < 1e-16:
            continue

        # adjust if needed:
        r2 = (r[0], r[1] - w_offset[1], r[2], r[3] - w_offset[0])
        wnd.append(r2)
        bag.append(desc.compute(image[r[0]:r[1], r[2]:r[3]]))

        n += 1
        if n > n_obj:
            break

    return {desc.name: bag, 'regs': wnd}