Ejemplo n.º 1
0
    def extract_trees(self, outp):

        dataio.check_folder(self.sp)
        dataio.check_folder(self.mem)

        sp_files = dataio.FileReader(self.sp).extract_files()
        mem_files = dataio.FileReader(self.mem).extract_files()

        # Iterate through all files and extract history independently
        for (i, j) in zip(sp_files, mem_files):
            file_name = os.path.splitext(os.path.basename(j))[0]
            output_file = os.path.join(outp, file_name + ".txt")
            history = self._extract_specific(i, j, output_file)
            print('Created merge history {}'.format(history))
Ejemplo n.º 2
0
def merge_dataset(folder, histories, thresh, outp):
    """ Thresholds images by merging all supervoxels up to a given threshold 
    :param folder: Folder containing superpixel images
    :param histories: Folder containing the corresponding merge tree histories
    :param thresh: Merge threshold up to which merge regions
    :param outp: Folder where to store resulting images 
    """ 
    dataio.create_dir(outp)
    # Read superpixels and histories
    sps = dataio.FileReader(folder).extract_files()
    hists = dataio.FileReader(histories, exts=['.txt', '.dat', '.data']).extract_files()

    for (s, h) in zip(sps, hists):
        img = merge_superpixels(s, h, thresh)
        name, ext = os.path.splitext(s)
        mh.imsave(os.path.join(outp, os.path.basename(name) + ext), img.astype(float))
Ejemplo n.º 3
0
    def read(self, num=None):
        """Reads image content from the dataset path. If a HDF5 provided, the group
        where images are stored must be provided. Default image extensions are PNG,
        TIF and TIFF. Others can be provided
            :param num: Number of images to read from the dataset. Set to None for reading all"""

        if not os.path.exists(self.path):
            raise ValueError('Path {} does not exist'.format(self.path))

        if os.path.isfile(self.path):
            if not dataio.valid_volume_path(self.path):
                raise ValueError('Invalid extension for file {}'.format(
                    self.path))
            with h5py.File(self.path, 'r') as f:
                dataset = dataio.get_hf_group(f, self.group)
                if num is None:
                    self.imgs = dataset[()]
                else:
                    if num > dataset.shape[0]:
                        raise ValueError(
                            'Cannot read more images than the ones available')
                    self.imgs = dataset[0:num][:]
                # Previous code loads all images in memory. Fix this for big
                # datasets when memory is limited
        else:
            reader = dataio.FileReader(self.path, self.exts)
            self.imgs = reader.read(num)

        if self.imgs.shape[0] == 0:
            raise ValueError('No data has been read')
Ejemplo n.º 4
0
def evaluate_solution(sp_path, gt, init, end):
    """ Evaluate a pair of corresponding images (superpixel-groundtruth) and returns
    the corresponding Adjusted Rand and Variation of Information metrics
        :param sp_path: Superpixel folder
        :param gt: Groundtruth volume in HDF5 file
        :param init: First section in the interval to evaluate. Set to None for all
        :param end: Last section in the interval to evaluate. Set to None for all
    """
    # Extract superpixel names
    sps_imgs = dataio.FileReader(sp_path).extract_files()

    # Compute interval
    start = 0 if init is None else init
    ending = len(sps_imgs) - 1 if end is None else end
    if ending is not None and ending > len(sps_imgs):
        print('Cannot evaluate further than section %d' % len(sps_imgs) - 1)
    print('Evaluating CRAG in interval (%d-%d)' % (start, ending))

    # Compute average metrics
    r, vs, vm = 0.0, 0.0, 0.0
    for i in range(start, ending):
        print('Evaluating image %s, iteration %d' % (sps_imgs[i], i))
        sp = mh.imread(sps_imgs[i])
        r += rand.adapted_rand(sp, gt[i], all_stats=False)
        v = voi.voi(sp, gt[i])
        vs += v[0]
        vm += v[1]
    # Average over evaluated images
    num = ending - start + 1
    return r / float(num), vs / float(num), vm / float(num)
Ejemplo n.º 5
0
def _restrict_set(folder, inds, img=True):
    """ Copies the corresponding files in the folder into a 
    temporary location and returns the path
    :param folder: Folder to get images from
    :param inds: Indexes of files to select from folder (indexes of sorted filenames)
    :param img: Whether the files in the folder refer to images or not """
    # Select files accordint to mode, get subset
    files = dataio.FileReader(folder) if img is True else \
        dataio.FileReader(folder, exts=['.txt', '.dat', '.data'])
    names = files.extract_files()
    subset = names[inds[0]:inds[1]]
    # Copy subset into temporary location
    tmpath = tempfile.mkdtemp()
    for i in subset:
        dst = os.path.join(tmpath, os.path.basename(i))
        shutil.copyfile(i, dst)
    return tmpath
Ejemplo n.º 6
0
 def process_folder(self, inp):
     print('Processing ' + inp)
     files = dataio.FileReader(inp).read()
     fd, out = tempfile.mkstemp(dir=self.tmp, suffix='.h5')
     self._file_fds.append(fd)
     print('Storing into ' + out)
     with h5py.File(out) as f:
         f.create_dataset('stack', data=files, compression='gzip')
     return out
Ejemplo n.º 7
0
def load_history_values(path):
    """
    Loads the histories contained in the input folder and returns
    the list of values
    """
    total = []
    hists = dataio.FileReader(path, exts=['.txt', '.dat', '.data']).extract_files()
    for i in hists:
        scores = read_history_scores(i)
        total = total + scores
    return total
Ejemplo n.º 8
0
def evaluate_supervoxels(sp_folder, gt_folder):
    """ Evaluates the segmented images against their groundtruth and provides
    the Adjusted Rand and Variation of Information (VOI) metrics [split, merge]
        Args:
            sp_folder: Path to superpixel folder
            gt_folder: Path to corresponding groundtruth folder
    """
    sp_files = dataio.FileReader(sp_folder).extract_files()
    gt_files = dataio.FileReader(gt_folder).extract_files()

    if len(sp_files) != len(gt_files):
        raise ValueError('Both folders must contain the same number ' +
                         ' of images and have a proper ordering')

    r, vs, vm = 0.0, 0.0, 0.0
    for (s, g) in zip(sp_files, gt_files):
        logging.debug('Evaluating image %s against %s' % (s, g))
        sp, gt = mh.imread(s), mh.imread(g)
        r += rand.adapted_rand(sp, gt, all_stats=False)
        v = voi.voi(sp, gt)
        vs += v[0]
        vm += v[1]
    return r / len(sp_files), [vs / len(sp_files), vm / len(sp_files)]
Ejemplo n.º 9
0
def load_histories(path): 
    """
    Loads the histories contained in the input folder
    """
    hists = dataio.FileReader(path, exts=['.txt', '.dat', '.data']).extract_files()
    return [read_history(i) for i in hists]