예제 #1
0
def run_channel_extraction(ds, images=[], colorspace='rgb',
                           methods=['gabor', 'gaussian', 'sobel'],
                           methods_params=None, overwrite=False,
                           cfg=None):
    logger.info('Channel extraction started...')

    stats = [{'max': 0., 'min': 255.}
             for i in range(channels.get_number_channels(methods_params=methods_params,
                                                         methods=methods))]

    for i, im in iterate_images(ds, images, overwrite, ['channels']):

        img = filesys.read_image_file(ds, im)

        # img is now [i,j,rgb]:                   grayscale channels   extra_channels
        # img becomes [i,j, channels] channels -> gray      (r g b)    (gabor sigmadiff)
        img = channels.add_channels(
            img, colorspace, methods=methods,
            methods_params=methods_params)
        filesys.write_export_file(ds, im, 'channels', img)

        stats = [{'max': np.max([stats[j]['max'], img[:,:,j+4].max()]),
                  'min': np.min([stats[j]['min'], img[:,:,j+4].min()])}
                 for j in range(img.shape[-1] - 4)]
    
    filesys.write_log_file(ds, {'channelstats': stats})

    logger.info('Channel extraction finished.')
예제 #2
0
def run_feature_extraction(ds, images=[], feature_blocks=[],
                           colorspace='rgb', model_dataset=None,
                           overwrite=False, image_slice=1,
                           blocks_params={}, cfg=None):

    logger.info('Feature extraction started...')

    # create feature block list
    feature_blocks = create_feature_list(feature_blocks)

    for i, im in iterate_images(ds, images, overwrite,
                                ['features.%s' % re.sub('^extract_blocks_', '', k)
                                 for k in feature_blocks.keys()]):

        segments = filesys.read_export_file(ds, im, 'segments')

        if segments is None:
            logging.warning(
                'No segmentation found for image: %s' % im)
            continue

        meta = filesys.read_export_file(ds, im, 'meta')
        if meta['superpixel_grid_error']:
            logging.warning(
                'Invalid segmentation found for image: %s' % im)
            continue

        # load image
        img = filesys.read_export_file(ds, im, 'channels.normalized')
        if img is None:
            img = filesys.read_export_file(ds, im, 'channels')
        if img is None:
            img = filesys.read_image_file(ds, im)

        # extract features
        features, features_in_block = \
            cls.features.blocks.extract_blocks(img[::image_slice,::image_slice,:],
                                               segments[::image_slice,::image_slice],
                                               colorspace=colorspace,
                                               blocks=feature_blocks,
                                               blocks_params=blocks_params)

        # remove too large features
        features = cls.features.remove_large_features(features)

        # write features to disk
        filesys.write_feature_files(
            ds, im, features, features_in_block)

        meta = {
            'last feature extraction': time.strftime('%d-%b-%Y %H:%M')}
        filesys.write_export_file(ds, im, 'meta', meta, append=True)

    logger.info('Feature extraction finished.')
예제 #3
0
def run_prediction(ds, images='all', model=None,
                   model_dataset=None, colorspace='rgb',
                   feature_blocks='all', overwrite=False, cfg=None):

    if model_dataset is None:
        model_dataset = ds

    if model is None:
        model = filesys.get_model_list(model_dataset)[-1]
    if type(model) is str:
        logging.info('Using model %s' % model)
        model = filesys.read_model_file(model_dataset, model)[0]
    if not hasattr(model,'predict'):
        raise IOError('Invalid model input type') 

    # create image list
    images = create_image_list(ds, images)

    # create block list
    blocks = create_feature_list(feature_blocks)

    # read feature data
    X = get_data(ds,
                 images,
                 feature_blocks=blocks)[0]

    for i, im in iterate_images(ds, images, overwrite, 'predict'):
        if X[i] is None:
            continue

        shp = filesys.read_export_file(
            ds, im, 'meta')['superpixel_grid']
        X[i] = np.asarray(X[i]).reshape((shp[0], shp[1], -1))

        # run prediction
        try:
            classes = model.predict([X[i]])[0]
        except:
            logger.error('Error predicting %s' % im)
            continue

        # save raw data
        filesys.write_export_file(ds, im, 'predict', classes)

        # save plot
        img = filesys.read_image_file(ds, im)
        seg = filesys.read_export_file(ds, im, 'segments')
        cls = list(set(classes.flatten()))
        for i, c in enumerate(classes.flatten()):
            ix = cls.index(c)
            img[seg==i,ix-1] += .1
        img = np.minimum(1., img) * 255.
        fdir, fname = os.path.split(im)
        cv2.imwrite(os.path.join(fdir, 'predictions', os.path.splitext(fname)[0] + '.classes.png'), img)
예제 #4
0
def run_feature_normalization(ds, images=[], feature_blocks=[],
                              model_dataset=None, feature_stats=None,
                              overwrite=False, cfg=None):
    logger.info('Normalizing features started...')

    if not overwrite:
        feature_stats = filesys.read_log_file(ds, 'stats')
        
    if feature_stats is None:
        logger.info('Aggregate feature statistics')
    
        if model_dataset is not None and model_dataset != ds:
            images_model = filesys.get_image_list(model_dataset)
        else:
            images_model = images
            
        allstats = []
        for im in images_model:
            meta = filesys.read_export_file(ds, im, 'meta')
            if meta.has_key('stats'):
                stats = meta['stats']
                allstats.append(stats)
        
        feature_stats = \
            cls.features.normalize.aggregate_feature_stats(allstats)
        l = {'stats': feature_stats,
             'last stats computation': time.strftime('%d-%b-%Y %H:%M')}
        filesys.write_log_file(ds, l)

    # create feature block list
    feature_blocks = create_feature_list(feature_blocks)

    for i, im in iterate_images(ds, images, overwrite,
                                ['features.normalized.%s' % re.sub('^extract_blocks_', '', k)
                                 for k in feature_blocks.keys()]):

        feature_stats = filesys.read_log_file(ds, keys='stats')

        features, features_in_block = filesys.read_feature_files(
            ds, im, feature_blocks.keys() + ['relloc'], ext='linear')

        if features is not None:
            features = cls.features.normalize.normalize_features(
                features, feature_stats)
            filesys.write_feature_files(
                ds, im, features, features_in_block, ext='normalized')

            meta = {'last normalized': time.strftime('%d-%b-%Y %H:%M')}
            filesys.write_export_file(ds, im, 'meta', meta, append=True)

    logger.info('Normalizing features finished.')
예제 #5
0
def run_relative_location_mapping(ds, n=100, sigma=2,
                                  class_aggregation=None, cfg=None):
    logger.info('Computing relative location maps started...')

    # get image list
    images = filesys.get_image_list(ds)

    # loop over training samples
    maplist = []
    for i, im in iterate_images(ds, images):

        if not filesys.is_classified(ds, im):
            logging.warning(
                'Image %s not annotated, skipped' % im)
            continue

        annotations = filesys.read_export_file(ds, im, 'classes')
        meta = filesys.read_export_file(ds, im, 'meta')
        nx, ny = meta['superpixel_grid']
        nm, nn = meta['image_resolution_cropped'][:-1]

        if not len(annotations) == nx * ny:
            logging.warning(
                'Size mismatch for image %s, skipped' % im)
            continue

        centroids = filesys.read_feature_files(
            ds, im, ['pixel'])[0].ix[:, 'centroid']
        annotations = cls.utils.aggregate_classes(
            np.asarray(annotations), class_aggregation)

        maplist.append(relativelocation.compute_prior(annotations,
                                                      centroids,
                                                      (nm, nn),
                                                      (nx, ny),
                                                      n=n))

    maps = relativelocation.aggregate_maps(maplist)
    maps = relativelocation.smooth_maps(maps, sigma=sigma)
    maps = relativelocation.panel_to_dict(maps)

    filesys.write_export_file(ds, None, 'relative_location_maps', maps)

    l = {'last relative location prior computation':
         time.strftime('%d-%b-%Y %H:%M')}
    filesys.write_log_file(ds, l)

    logger.info('Computing relative location maps finished.')
예제 #6
0
def run_segmentation(ds, images=[], method='slic', method_params={},
                     extract_contours=False, remove_disjoint=True,
                     overwrite=False, cfg=None, roi=None):

    logger.info('Segmentation started...')

    for i, im in iterate_images(ds, images, overwrite=overwrite, ext='segments'):

        img = filesys.read_image_file(ds, im)

        if roi is not None:
            cnum = int(re.findall('(?<=\.c)[0-9](?=\.)',im)[0]) # Temporary hack to link camera number to roi...
            roi = filesys.read_roi_file(ds,iroi=cnum-2)

        segments, contours = seg.superpixels.get_segmentation(
            img, method=method, method_params=method_params,
            extract_contours=extract_contours,
            remove_disjoint=remove_disjoint,roi=roi)

        nx, ny = seg.superpixels.get_superpixel_grid(
            segments, img.shape[:2])
        err = not seg.superpixels.check_segmentation(segments, nx, ny)

        meta = {'image_resolution_cropped': img.shape,
                'superpixel_grid': (nx, ny),
                'superpixel_grid_error': err,
                'last_segmented': time.strftime('%d-%b-%Y %H:%M')}

        filesys.write_export_file(ds, im, 'meta', meta, append=True)
        filesys.write_export_file(ds, im, 'segments', segments)
        filesys.write_export_file(ds, im, 'contours', contours)

    logger.info('Segmentation finished.')
예제 #7
0
def run_channel_normalization(ds, images=[], model_dataset=None,
                              methods=['gabor', 'gaussian', 'sobel'],
                              methods_params=None, overwrite=False,
                              cfg=None):

    logger.info('Channel normalization started...')

    stats = filesys.read_log_file(
        model_dataset if model_dataset is not None else ds,
        'channelstats')
    if not stats:
        logger.info(
            'Using theoretical channel boundaries for normalization.')
        stats = channels.get_channel_bounds(methods=methods, methods_params=methods_params)

    for i, im in iterate_images(ds, images, overwrite, 'channels.normalized'):
        if filesys.check_export_file(ds, im, 'channels'):
            img = filesys.read_export_file(ds, im, 'channels')
            for j in range(4, img.shape[-1]):
                img[...,j] = channels.normalize_channel(img[...,j],
                                                        stats[i-4])
            filesys.write_export_file(ds, im, 'channels.normalized', img)

    logger.info('Channel normalization finished.')
예제 #8
0
def run_feature_update(ds, images=[], feature_blocks=[],
                       class_aggregation=None,
                       relative_location_prior=False,
                       overwrite=False, cfg=None):
    logger.info('Updating extracted features started...')

    # create feature block list
    feature_blocks = create_feature_list(feature_blocks)

    if relative_location_prior:
        maps = filesys.read_export_file(
            ds, None, 'relative_location_maps')

    for i, im in iterate_images(ds, images, overwrite,
                                ['features.linear.%s' % re.sub('^extract_blocks_', '', k)
                                 for k in feature_blocks.keys()]):

        # load image and features
        img = filesys.read_image_file(ds, im)
        features, features_in_block = filesys.read_feature_files(
            ds, im, feature_blocks.keys())

        if features is None:
            continue

        # include relative location feature if requested
        if relative_location_prior:
            try:
                logger.info('Add relative location votes')

                Iann = filesys.read_export_file(ds, im, 'classes')
                meta = filesys.read_export_file(ds, im, 'meta')
                nx, ny = meta['superpixel_grid']
                nm, nn = meta['image_resolution_cropped'][:-1]
                Iann = np.reshape(Iann, meta['superpixel_grid'])

                centroids = filesys.read_feature_files(
                    ds, im, ['pixel'])[0].ix[:, 'centroid']
                Iann = cls.utils.aggregate_classes(
                    np.asarray(Iann), class_aggregation)

                votes = relativelocation.vote_image(
                    Iann, maps, centroids, (nm, nn))[0]

                features, features_in_block = \
                    relativelocation.add_features(
                    votes, features, features_in_block)
                filesys.write_feature_files(
                    ds, im, features, features_in_block)
            except:
                logging.warning(
                    'Adding relative location votes failed, using zeros')
                features = relativelocation.remove_features(
                    features, maps.keys())
                features_in_block['relloc'] = [
                    'prob_%s' % c for c in maps.keys()]

            meta = {'last relative location voting':
                    time.strftime('%d-%b-%Y %H:%M')}
            filesys.write_export_file(
                ds, im, 'meta', meta, append=True)

        # make features scale invariant
        logger.info('Make features scale invariant')
        features = cls.features.scaleinvariant.scale_features(
            img, features)
        filesys.write_feature_files(
            ds, im, features, features_in_block, ext='invariant')

        # linearize features
        logger.info('Linearize features')
        features = cls.features.linearize(features)
        features_in_block = cls.features.extend_feature_blocks(
            features, features_in_block)
        filesys.write_feature_files(
            ds, im, features, features_in_block, ext='linear')

        # get feature stats for image
        logger.info('Compute feature statistics')
        imstats = cls.features.normalize.compute_feature_stats(features)

        meta = {'stats': imstats,
                'last stats computation': time.strftime('%d-%b-%Y %H:%M')}
        filesys.write_export_file(ds, im, 'meta', meta, append=True)

    logger.info('Updating extracted features finished.')