Ejemplo n.º 1
0
def split_in_xyz(ipl):

    ipl.logging('Datastructure\n---\n{}', ipl.datastructure2string())

    reskeys = ('0', '1')
    ipl_split = IPL()
    ipl_split['z'] = ipl.anytask(lib.split,
                                 2,
                                 axis=0,
                                 result_keys=reskeys,
                                 return_only=True,
                                 rtrntype=IPL)
    ipl_split['y'] = ipl.anytask(lib.split,
                                 2,
                                 axis=1,
                                 result_keys=reskeys,
                                 return_only=True,
                                 rtrntype=IPL)
    ipl_split['x'] = ipl.anytask(lib.split,
                                 2,
                                 axis=2,
                                 result_keys=reskeys,
                                 return_only=True,
                                 rtrntype=IPL)

    ipl_split = ipl_split.switch_levels(1, 2)
    ipl.logging('Split sample datastructure\n---\n{}',
                ipl_split.datastructure2string())

    return ipl_split
Ejemplo n.º 2
0
def compute_faces(ipl, key):

    faces = IPL(data={key: ipl[key]})
    shp = faces[key].shape
    ipl.logging('Computing faces ...')
    faces.get_faces_with_neighbors(keys=key, rtrntype=IPL)

    # startpoints = ipl['faces', keys[0]].keys()
    additionalinfo = IPL()
    startpoints = IPL(data={'xyf': shp[2],
                   'xyb': shp[2],
                   'xzf': shp[1],
                   'xzb': shp[1],
                   'yzf': shp[0],
                   'yzb': shp[0]})
    areas = IPL(data={'xyf': np.s_[shp[2]:shp[2] + shp[0], shp[2]:shp[2] + shp[1]],
             'xyb': np.s_[shp[2]:shp[2] + shp[0], shp[2]:shp[2] + shp[1]],
             'xzf': np.s_[shp[1]:shp[1] + shp[0], shp[1]:shp[1] + shp[2]],
             'xzb': np.s_[shp[1]:shp[1] + shp[0], shp[1]:shp[1] + shp[2]],
             'yzf': np.s_[shp[0]:shp[0] + shp[1], shp[0]:shp[0] + shp[2]],
             'yzb': np.s_[shp[0]:shp[0] + shp[1], shp[0]:shp[0] + shp[2]]})

    additionalinfo[key, 'startpoints'] = startpoints
    additionalinfo[key, 'areas'] = areas

    return (faces, additionalinfo)
def get_features(paths, featureimage, featurelist, max_paths_per_label, hfp=None):

    newfeats = IPL()
    keylist = range(0, max_paths_per_label)
    keylist = [str(x) for x in keylist]
    for i, keys, vals in paths.simultaneous_iterator(max_count_per_item=max_paths_per_label, keylist=keylist):

        if hfp is not None:
            hfp.logging("Working in iteration = {}", i)

        image = np.zeros(featureimage.shape, dtype=np.uint32)

        c = 1
        for curk, curv in (dict(zip(keys, vals))).iteritems():

            curv = lib.swapaxes(curv, 0, 1)
            lib.positions2value(image, curv, c)
            c += 1

        newnewfeats = IPL(
            data=vigra.analysis.extractRegionFeatures(featureimage, image, ignoreLabel=0, features=featurelist)
        )

        for k, v in newnewfeats.iteritems():
            newnewfeats[k] = newnewfeats[k][1:]
            if k in newfeats:
                try:
                    newfeats[k] = np.concatenate((newfeats[k], newnewfeats[k]))
                except ValueError:
                    pass
            else:
                newfeats[k] = newnewfeats[k]

    return newfeats
Ejemplo n.º 4
0
def get_features(paths, featureimages, featurelist, max_paths_per_label, ipl=None):

    newfeats = IPL()

    keylist = range(0, max_paths_per_label)
    keylist = [str(x) for x in keylist]

    # Iterate over all paths, yielding a list of one path per label object until no paths are left
    for i, keys, vals in paths.simultaneous_iterator(
            max_count_per_item=max_paths_per_label,
            keylist=keylist):

        if ipl is not None:
            ipl.logging('Working in iteration = {}', i)
            ipl.logging('Keys: {}', keys)

        if not keys:
            continue

        # Create a working image
        image = np.zeros(np.array(featureimages.yield_an_item()).shape, dtype=np.uint32)
        # And fill it with one path per label object
        c = 1
        for curk, curv in (dict(zip(keys, vals))).iteritems():
            curv = np.array(curv)
            curv = lib.swapaxes(curv, 0, 1)
            lib.positions2value(image, curv, c)
            c += 1

        # TODO: If this loop iterated over the parameter list it would be more broadly applicable
        for d, k, v, kl in featureimages.data_iterator():

            if type(v) is not IPL:

                # Extract the region features of the working image
                # TODO: Extract feature 'Count' manually due to anisotropy
                newnewfeats = IPL(
                    data=vigra.analysis.extractRegionFeatures(
                        np.array(v).astype(np.float32),
                        image, ignoreLabel=0,
                        features=featurelist
                    )
                )
                # Append to the recently computed list of features
                for nk, nv in newnewfeats.iteritems():
                    nv = nv[1:]
                    if newfeats.inkeys(kl+[nk]):
                        try:
                            newfeats[kl + [nk]] = np.concatenate((newfeats[kl + [nk]], nv))
                        except ValueError:
                            pass
                    else:
                        newfeats[kl + [nk]] = nv

    return newfeats
Ejemplo n.º 5
0
def split_in_xyz(ipl):

    ipl.logging('Datastructure\n---\n{}', ipl.datastructure2string())

    reskeys = ('0', '1')
    ipl_split = IPL()
    ipl_split['z'] = ipl.anytask(lib.split, 2, axis=0, result_keys=reskeys, return_only=True, rtrntype=IPL)
    ipl_split['y'] = ipl.anytask(lib.split, 2, axis=1, result_keys=reskeys, return_only=True, rtrntype=IPL)
    ipl_split['x'] = ipl.anytask(lib.split, 2, axis=2, result_keys=reskeys, return_only=True, rtrntype=IPL)

    ipl_split = ipl_split.switch_levels(1, 2)
    ipl.logging('Split sample datastructure\n---\n{}', ipl_split.datastructure2string())

    return ipl_split
def load_images(filepath, skeys=None, recursive_search=False, logger=None):

    if logger is not None:
        logger.logging('Loading data from \n{}', filepath)
    else:
        print 'Loading data from \n{}'.format(filepath)

    data = IPL()

    data.data_from_file(filepath=filepath,
                        skeys=skeys,
                        recursive_search=recursive_search,
                        nodata=True)

    return data
def get_features(paths, featureimage, featurelist, max_paths_per_label, ipl=None):

    newfeats = IPL()

    # TODO: Selection of a limited amount of paths should be random
    keylist = range(0, max_paths_per_label)
    keylist = [str(x) for x in keylist]

    # Iterate over all paths, yielding a list of one path per label object until no paths are left
    for i, keys, vals in paths.simultaneous_iterator(
            max_count_per_item=max_paths_per_label,
            keylist=keylist):

        if ipl is not None:
            ipl.logging('Working in iteration = {}', i)

        # Create a working image
        image = np.zeros(featureimage.shape, dtype=np.uint32)
        # And fill it with one path per label object
        c = 1
        for curk, curv in (dict(zip(keys, vals))).iteritems():
            curv = lib.swapaxes(curv, 0, 1)
            lib.positions2value(image, curv, c)
            c += 1

        # Extract the region features of the working image
        newnewfeats = IPL(
            data=vigra.analysis.extractRegionFeatures(
                featureimage,
                image, ignoreLabel=0,
                features=featurelist
            )
        )

        for k, v in newnewfeats.iteritems():
            newnewfeats[k] = newnewfeats[k][1:]
            if k in newfeats:
                try:
                    newfeats[k] = np.concatenate((newfeats[k], newnewfeats[k]))
                except ValueError:
                    pass
            else:
                newfeats[k] = newnewfeats[k]

    return newfeats
def merge_adjacent_objects_image_iteration(ipl):

    params = ipl.get_params()
    thisparams = params['merge_adjacent_objects']

    merged = IPL()

    for d, k, v, kl in ipl.data_iterator(yield_short_kl=True):

        ipl.logging('Working on image: {}', kl + [k])

        if k == params['largeobjname']:

            data = IPL(data={k: v})
            data.setlogger(ipl.getlogger())
            merged[kl] = merge_adjacent_objects(data, k, thisparams)

    return merged
def merge_adjacent_objects_image_iteration(ipl):

    params = ipl.get_params()
    thisparams = params['merge_adjacent_objects']

    merged = IPL()

    for d, k, v, kl in ipl.data_iterator(yield_short_kl=True):

        ipl.logging('Working on image: {}', kl + [k])

        if k == params['largeobjname']:

            data = IPL(data={k: v})
            data.setlogger(ipl.getlogger())
            merged[kl] = merge_adjacent_objects(data, k, thisparams)

    return merged
Ejemplo n.º 10
0
def get_features(paths,
                 featureimage,
                 featurelist,
                 max_paths_per_label,
                 hfp=None):

    newfeats = IPL()
    keylist = range(0, max_paths_per_label)
    keylist = [str(x) for x in keylist]
    for i, keys, vals in paths.simultaneous_iterator(
            max_count_per_item=max_paths_per_label, keylist=keylist):

        if hfp is not None:
            hfp.logging('Working in iteration = {}', i)

        image = np.zeros(featureimage.shape, dtype=np.uint32)

        c = 1
        for curk, curv in (dict(zip(keys, vals))).iteritems():

            curv = lib.swapaxes(curv, 0, 1)
            lib.positions2value(image, curv, c)
            c += 1

        newnewfeats = IPL(data=vigra.analysis.extractRegionFeatures(
            featureimage, image, ignoreLabel=0, features=featurelist))

        for k, v in newnewfeats.iteritems():
            newnewfeats[k] = newnewfeats[k][1:]
            if k in newfeats:
                try:
                    newfeats[k] = np.concatenate((newfeats[k], newnewfeats[k]))
                except ValueError:
                    pass
            else:
                newfeats[k] = newnewfeats[k]

    return newfeats
def load_images(ipl):
    """
    These images are loaded:
    paths_true (paths within single label objects)
    paths_false (paths of merged objects which cross the merging site)
    featureims_true
    featureims_false
    :param ipl:
    :return:
    """
    paths_true = IPL()
    paths_false = IPL()
    featureims = IPL()

    params = ipl.get_params()

    ipl.logging('Loading true paths from:\n{} ...', params['intermedfolder'] + params['pathsfile'])
    # Paths within labels (true paths)
    paths_true.data_from_file(
        filepath=params['intermedfolder'] + params['pathsfile'],
        skeys='truepaths',
        recursive_search=True, nodata=True
    )

    ipl.logging('Loading false paths from:\n{} ...', params['intermedfolder'] + params['pathsfile'])
    # Paths of merges (false paths)
    paths_false.data_from_file(
        filepath=params['intermedfolder'] + params['pathsfile'],
        skeys='falsepaths',
        recursive_search=True, nodata=True
    )

    ipl.logging('Loading feature  images from:\n{} ...', params['intermedfolder'] + params['featureimsfile'])
    # Load features for true paths
    featureims.data_from_file(
        filepath=params['intermedfolder'] + params['featureimsfile'],
        nodata=True
    )

    return (paths_true, paths_false, featureims)
Ejemplo n.º 12
0
def run_random_forest(yamlfile, logging=True, make_only_feature_array=False, debug=False, write=True):

    ipl = IPL(yaml=yamlfile)

    ipl.set_indent(1)

    params = rdict(data=ipl.get_params())
    if logging:
        ipl.startlogger(filename=params['resultfolder'] + 'random_forest.log', type='w', name='RandomForest')
    else:
        ipl.startlogger()

    try:

        # # Copy the script file and the parameters to the scriptsfolder
        # copy(inspect.stack()[0][1], params['scriptsfolder'])
        # copy(yamlfile, params['scriptsfolder'] + 'random_forest.parameters.yml')

        # ipl.logging('\nInitial datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        if make_only_feature_array:
            make_feature_arrays(ipl)
        else:
            result = IPL()
            result['result'], result['evaluation'] = random_forest(ipl, debug=debug)

            # ipl.logging('\nFinal datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

            if write:
                result.write(filepath=params['resultfolder'] + params['resultsfile'])

        ipl.logging('')
        ipl.stoplogger()

    except:
        ipl.errout('Unexpected error')
Ejemplo n.º 13
0
                           keys=locmaxnames)

    # Local maxima
    hfp.logging('Discovering mountains ...')
    hfp.extended_local_maxima(neighborhood=26, keys=locmaxnames)


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'

    hfp = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'intermedfolder',
                  'filename': 'locmaxborderfile',
                  'skeys': {
                      'locmaxbordernames': (2, 3)
                  }
              },
              tkeys=('disttransf', 'disttransfm'),
              castkey=None)
    params = hfp.get_params()
    thisparams = params['localmax_on_disttransf']
    hfp.startlogger(filename=params['resultfolder'] +
                    'localmax_on_disttransf.log',
                    type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile,
Ejemplo n.º 14
0
if __name__ == "__main__":

    infiles = [
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.probs.crop.h5',
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.raw_neurons.crop.h5'
    ]
    outfiles = [
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.probs.crop.crop_x10_110_y200_712_z200_712.split_xyz.h5',
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.raw_neurons.crop.crop_x10_110_y200_712_z200_712.split_xyz.h5'
    ]

    for i in xrange(0, len(infiles)):

        ipl = IPL(
            filepath=infiles[i]
        )
        ipl.logging('Datastructure\n---\n{}', ipl.datastructure2string())

        ipl.crop_bounding_rect(bounds=np.s_[10:110, 200:712, 200:712])

        def shape(image):
            return image.shape
        print ipl.datastructure2string(function=shape)

        ipl_split = split_in_xyz(ipl)

        ipl_split.write(filepath=outfiles[i])

    # # Sample A
    # sample_a = IPL(
Ejemplo n.º 15
0
from hdf5_image_processing import Hdf5ImageProcessingLib as IPL
import processing_lib as lib

# Sample A probs
probs_a = IPL(
    filepath=
    '/mnt/localdata01/jhennies/neuraldata/cremi_2016/sample_A_train_betas/sample_A_train_mcseg_beta_0.5.h5'
)

probs_a.logging('Probs A datastructure\n---\n{}',
                probs_a.datastructure2string())

probs_a.anytask(lib.swapaxes, 0, 2)

probs_a.write(
    '/mnt/localdata01/jhennies/neuraldata/cremi_2016/sample_A_train_betas/cremi.splA.train.seg_beta_0.5.crop.h5'
)

reskeys = ('0', '1')
split_probs_a = IPL()
split_probs_a['z'] = probs_a.anytask(lib.split,
                                     2,
                                     axis=0,
                                     result_keys=reskeys,
                                     return_only=True,
                                     rtrntype=IPL)
split_probs_a['y'] = probs_a.anytask(lib.split,
                                     2,
                                     axis=1,
                                     result_keys=reskeys,
                                     return_only=True,
    if thisparams['return_bordercontact_images']:
        return bordercontacts
    else:
        return None


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'largeobjfile', 'skeys': 'largeobjname'},
        recursive_search=True        
    )
    params = ipl.get_params()
    thisparams = params['find_border_contacts']
    ipl.data_from_file(params['intermedfolder'] + params['largeobjmfile'],
                       skeys=params['largeobjmnames'][0], recursive_search=True, integrate=True)
    ipl.startlogger(filename=params['resultfolder'] + 'find_border_contacts.log', type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile, params['scriptsfolder'] + 'find_border_contacts.parameters.yml')
        # Write script and parameters to the logfile
        ipl.code2log(inspect.stack()[0][1])
def merge_adjacent_objects(ipl, key, thisparams):
    """
    :param ipl:

    ipl.get_params():

        merge_adjacent_objects
            seed
            numberbysize
            numberbyrandom

        largeobjmnames
            - 'largeobj_merged'
            - 'mergeids_small'
            - 'mergeids_random'
            - 'mergeids_all'
            - 'change_hash'

    :param key: the source key for calculation
    """

    numberbysize = thisparams['numberbysize']
    numberbyrandom = thisparams['numberbyrandom']
    targetnames = params['largeobjmnames']

    # Get only the relevant labels
    labels = lib.unique(ipl[key])
    ipl.logging('labels = {}', labels)

    # Seed the randomize function
    random.seed(thisparams['seed'])

    ipl.astype(np.uint32, keys=key)
    (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
    edge_ids = rag.edgeIds()
    # ipl.logging('Edge ids: {}', edge_ids)

    # Type 1:
    # Select edges by size (smallest edges)
    ipl.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
    edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
    # ifp.logging('edgelen_ids = {}', edgelen_ids)
    sorted_edgelens = np.sort(rag.edgeLengths())
    #
    smallest_merge_lens = sorted_edgelens[0:numberbysize]
    ipl.logging('Lengths selected for merging: {}', smallest_merge_lens)
    #
    smallest_merge_ids = []
    for x in smallest_merge_lens:
        edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
        smallest_merge_ids.append(edge_id)
        edgelen_ids.pop(edge_id)
    #
    edge_ids = edgelen_ids.keys()
    ipl.logging('Edge IDs selected for merging due to size: {}',
                smallest_merge_ids)

    # Type 2:
    # Randomly choose edges
    random_merge_ids = random.sample(edge_ids, numberbyrandom)
    ipl.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

    # Now get the label ids
    smallest_merge_labelids_u = [
        rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids
    ]
    smallest_merge_labelids_v = [
        rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids
    ]
    smallest_merge_labelids = list(
        zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
    random_merge_labelids_u = [
        rag.uId(rag.edgeFromId(x)) for x in random_merge_ids
    ]
    random_merge_labelids_v = [
        rag.vId(rag.edgeFromId(x)) for x in random_merge_ids
    ]
    random_merge_labelids = list(
        zip(random_merge_labelids_u, random_merge_labelids_v))
    ipl.logging('Label IDs selected for merging by size: {}',
                smallest_merge_labelids)
    ipl.logging('Label IDs randomly selected for merging: {}',
                random_merge_labelids)

    # Concatenate
    all_merge_labelids = smallest_merge_labelids + random_merge_labelids
    # Sort
    ipl.logging('all_merge_labelids = {}', all_merge_labelids)
    all_merge_labelids = [sorted(x) for x in all_merge_labelids]
    all_merge_labelids = sorted(all_merge_labelids)
    ipl.logging('all_merge_labelids = {}', all_merge_labelids)

    # Store this for later use
    ipl[targetnames[1]] = smallest_merge_labelids
    ipl[targetnames[2]] = random_merge_labelids
    ipl[targetnames[3]] = all_merge_labelids

    # Create change hash list
    change_hash = IPL(data=dict(
        zip(np.unique(all_merge_labelids), [[
            x,
        ] for x in np.unique(all_merge_labelids)])))
    for i in xrange(0, 3):
        prev_change_hash = IPL(data=change_hash)
        for x in all_merge_labelids:
            ipl.logging('Adding {} and {}', *x)
            change_hash[x[0]] += change_hash[x[1]]
            change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
            change_hash[x[1]] += change_hash[x[0]]
            change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
    # This removes the redundancy from the hash
    def reduce(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break

    reduce(change_hash)
    # Change the list in the hash to np-arrays for better storage in h5 files
    for k, v in change_hash.iteritems():
        change_hash[k] = np.array(v)
    # And now we have a perfect change list which we just need to iterate over and change the labels in the image
    ipl.logging('change_hash after change:')
    ipl.logging(change_hash)
    ipl[targetnames[4]] = change_hash

    # Create the merged image
    # ipl.deepcopy_entry('largeobj', targetnames[0])
    ipl.rename_entry(key, targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                ipl.logging('Setting {} to {}!', x, k)
                ipl.filter_values(x, type='eq', setto=k, keys=targetnames[0])

    return ipl
            #  'disttransfm', thisparams['ignore'])

    return paths


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'intermedfolder',
                  'filename': 'locmaxborderfile',
                  'skeys': {
                      'locmaxbordernames': (1, 3)
                  }
              },
              recursive_search=True)
    params = ipl.get_params()
    thisparams = params['paths_of_partners']
    ipl.startlogger(filename=params['resultfolder'] + 'paths_of_partners.log',
                    type='w')
    ipl.data_from_file(params['intermedfolder'] + params['largeobjfile'],
                       skeys=params['largeobjname'],
                       recursive_search=True,
                       integrate=True)
    ipl.data_from_file(params['intermedfolder'] + params['largeobjmfile'],
                       skeys=(params['largeobjmnames'][0],
                              params['largeobjmnames'][4]),
Ejemplo n.º 19
0
    # cremi = IPL(filepath='/mnt/localdata02/jhennies/neuraldata/cremi_2016/sample_B_20160501.hdf')
    #
    # cremi.logging('Datastructure:\n---\n{}', cremi.datastructure2string())
    #
    # images = IPL(data={
    #     'raw': cremi['volumes', 'raw'],
    #     'neuron_ids': cremi['volumes', 'labels', 'neuron_ids']
    # })
    #
    # images.logging('Datastructure:\n---\n{}', images.datastructure2string())
    #
    # images.write('/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splB.raw_neurons.crop.h5')

    cremi = IPL(
        filepath=
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/sample_C_20160501.hdf'
    )

    cremi.logging('Datastructure:\n---\n{}', cremi.datastructure2string())

    images = IPL(
        data={
            'raw': cremi['volumes', 'raw'],
            'neuron_ids': cremi['volumes', 'labels', 'neuron_ids']
        })

    images.logging('Datastructure:\n---\n{}', images.datastructure2string())

    images.write(
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splC.raw_neurons.crop.h5'
    )
Ejemplo n.º 20
0
        c += 1

    del (hfp['faces'])

    if thisparams['return_bordercontact_images']:
        return bordercontacts


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'

    hfp = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'largeobjfile', 'skeys': 'largeobjname'},
        tkeys='largeobj',
        castkey=None
    )
    params = hfp.get_params()
    thisparams = params['find_border_contacts']
    hfp.data_from_file(params['intermedfolder'] + params['largeobjmfile'],
                       skeys=params['largeobjmnames'][0], tkeys='largeobjm')
    hfp.startlogger(filename=params['resultfolder'] + 'find_orphans.log', type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile, params['scriptsfolder'] + 'find_orphans.parameters.yml')
        # Write script and parameters to the logfile
        hfp.code2log(inspect.stack()[0][1])
Ejemplo n.º 21
0
def run_remove_small_objects(yamlfile):

    ipl = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'datafolder',
                  'filename': 'labelsfile',
                  'skeys': 'labelsname'
              },
              recursive_search=True,
              nodata=True)

    # Set indentation of the logging
    ipl.set_indent(1)

    params = ipl.get_params()
    ipl.startlogger(filename=params['resultfolder'] +
                    'remove_small_objects.log',
                    type='w',
                    name='RemoveSmallObjects')

    try:

        # # Copy the script file and the parameters to the scriptsfolder
        # copy(inspect.stack()[0][1], params['scriptsfolder'])
        # copy(yamlfile, params['scriptsfolder'] + 'remove_small_objects.parameters.yml')

        ipl.logging('\nipl datastructure: \n\n{}',
                    ipl.datastructure2string(maxdepth=3))

        remove_small_objects(ipl)

        ipl.logging('\nFinal datastructure: \n\n{}',
                    ipl.datastructure2string(maxdepth=3))

        # ipl.write(filepath=params['intermedfolder'] + params['largeobjfile'])

        ipl.logging('')
        ipl.stoplogger()

    except:

        ipl.errout('Unexpected error')
Ejemplo n.º 22
0
def run_random_forest(yamlfile, logging=True, make_only_feature_array=False, debug=False, write=True):

    ipl = IPL(yaml=yamlfile)

    ipl.set_indent(1)

    params = rdict(data=ipl.get_params())
    if logging:
        ipl.startlogger(filename=params['resultfolder'] + 'random_forest.log', type='w', name='RandomForest')
    else:
        ipl.startlogger()

    try:

        # # Copy the script file and the parameters to the scriptsfolder
        # copy(inspect.stack()[0][1], params['scriptsfolder'])
        # copy(yamlfile, params['scriptsfolder'] + 'random_forest.parameters.yml')

        # ipl.logging('\nInitial datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        if make_only_feature_array:
            make_feature_arrays(ipl)
        else:
            result = IPL()
            result['result'], result['evaluation'] = random_forest(ipl, debug=debug)

            # ipl.logging('\nFinal datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

            if write:
                result.write(filepath=params['resultfolder'] + params['resultsfile'])

        ipl.logging('')
        ipl.stoplogger()

    except:
        ipl.errout('Unexpected error')
            hfp[key].astype(np.float32) / np.amax(hfp[key]),
            vigra.filters.multiBinaryDilation(hfp[k].astype(np.uint8), 5)
        ])

    return paths


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'

    hfp = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'intermedfolder',
                  'filename': 'locmaxborderfile',
                  'skeys': {
                      'locmaxbordernames': (0, 2)
                  }
              },
              tkeys=('border_locmax', 'disttransf'),
              castkey=None)
    params = hfp.get_params()
    thisparams = params['paths_within_labels']
    hfp.startlogger(filename=params['resultfolder'] +
                    'paths_within_labels.log',
                    type='w')
    hfp.data_from_file(params['intermedfolder'] + params['largeobjfile'],
                       skeys=params['largeobjname'],
                       tkeys='largeobj')
    hfp.data_from_file(params['intermedfolder'] + params['locmaxfile'],
                       skeys=params['locmaxnames'][0],
                       tkeys='locmax')
def run_compute_feature_images(yamlfile):

    ipl = IPL(yaml=yamlfile)

    ipl.set_indent(1)

    params = rdict(data=ipl.get_params())
    ipl.startlogger(filename=params['resultfolder'] + 'compute_feature_images.log', type='w', name='ComputeFeatureImages')

    try:

        # # Copy the script file and the parameters to the scriptsfolder
        # copy(inspect.stack()[0][1], params['scriptsfolder'])
        # copy(yamlfile, params['scriptsfolder'] + 'compute_feature_images.parameters.yml')

        # ipl.logging('\nInitial datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        compute_feature_images(ipl)

        # ipl.logging('\nFinal datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        # ipl.write(filepath=params['intermedfolder'] + params['largeobjfile'])

        ipl.logging('')
        ipl.stoplogger()

    except:

        ipl.errout('Unexpected error')
                                (params['locmaxbordernames'][0], params['locmaxnames'][0]),
                                params['locmaxbordernames'][2],
                                thisparams, ignore=thisparams['ignore'])

    return paths


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'largeobjfile', 'skeys': 'largeobjname'},
        recursive_search=True
    )

    params = ipl.get_params()
    thisparams = params['paths_within_labels']
    ipl.startlogger(filename=params['resultfolder'] + 'paths_within_labels.log', type='w')

    ipl.data_from_file(params['intermedfolder'] + params['locmaxfile'],
                       skeys=params['locmaxnames'][0],
                       recursive_search=True,
                       integrate=True)
    ipl.data_from_file(params['intermedfolder'] + params['locmaxborderfile'],
                       skeys=(params['locmaxbordernames'][0], params['locmaxbordernames'][2]),
                       recursive_search=True,
                       integrate=True)
Ejemplo n.º 26
0
# Done: Visualize Distancetransform along paths of true and false merges
# Done: Also intensity values of raw data
# TODO: And probability map
# Done: Do not forget to save the result for the thesis!


__author__ = 'jhennies'


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'
    hfp = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'pathstruefile'},
        castkey=None
    )
    # hfp.logging('datastructure:\n---\n{}', hfp.datastructure2string())
    params = hfp.get_params()

    hfp['true', 'border'] = IPL(data=hfp['largeobj', 'border_locmax', 'path'])
    hfp['true', 'locmax'] = IPL(data=hfp['largeobj', 'locmax', 'path'])
    hfp.pop('largeobj')

    hfp.data_from_file(filepath=params['intermedfolder'] + params['pathsfalsefile'])

    hfp['false', 'border'] = IPL(data=hfp['largeobjm', 'border_locmax_m', 'path'])
    hfp['false', 'locmax'] = IPL(data=hfp['largeobjm', 'locmaxm', 'path'])
    hfp.pop('largeobjm')
def merge_adjacent_objects(hfp):

    params = hfp.get_params()
    thisparams = params['merge_adjacent_objects']

    numberbysize = thisparams['numberbysize']
    numberbyrandom = thisparams['numberbyrandom']
    targetnames = params['largeobjmnames']

    # Get only the relevant labels
    labels = lib.unique(hfp['largeobj'])
    hfp.logging('labels = {}', labels)

    # Seed the randomize function
    random.seed(thisparams['seed'])

    hfp.astype(np.uint32, keys='largeobj')
    (grag, rag) = graphs.gridRegionAdjacencyGraph(hfp['largeobj'], ignoreLabel=0)
    edge_ids = rag.edgeIds()
    # hfp.logging('Edge ids: {}', edge_ids)

    # Type 1:
    # Select edges by size (smallest edges)
    hfp.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
    edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
    # ifp.logging('edgelen_ids = {}', edgelen_ids)
    sorted_edgelens = np.sort(rag.edgeLengths())
    #
    smallest_merge_lens = sorted_edgelens[0:numberbysize]
    hfp.logging('Lengths selected for merging: {}', smallest_merge_lens)
    #
    smallest_merge_ids = []
    for x in smallest_merge_lens:
        edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
        smallest_merge_ids.append(edge_id)
        edgelen_ids.pop(edge_id)
    #
    edge_ids = edgelen_ids.keys()
    hfp.logging('Edge IDs selected for merging due to size: {}', smallest_merge_ids)

    # Type 2:
    # Randomly choose edges
    random_merge_ids = random.sample(edge_ids, numberbyrandom)
    hfp.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

    # Now get the label ids
    smallest_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids]
    smallest_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids]
    smallest_merge_labelids = list(zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
    random_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in random_merge_ids]
    random_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in random_merge_ids]
    random_merge_labelids = list(zip(random_merge_labelids_u, random_merge_labelids_v))
    hfp.logging('Label IDs selected for merging by size: {}', smallest_merge_labelids)
    hfp.logging('Label IDs randomly selected for merging: {}', random_merge_labelids)

    # Concatenate
    all_merge_labelids = smallest_merge_labelids + random_merge_labelids
    # Sort
    hfp.logging('all_merge_labelids = {}', all_merge_labelids)
    all_merge_labelids = [sorted(x) for x in all_merge_labelids]
    all_merge_labelids = sorted(all_merge_labelids)
    hfp.logging('all_merge_labelids = {}', all_merge_labelids)

    # Store this for later use
    hfp[targetnames[1]] = smallest_merge_labelids
    hfp[targetnames[2]] = random_merge_labelids
    hfp[targetnames[3]] = all_merge_labelids

    # Create change hash list
    change_hash = IPL(data=dict(zip(np.unique(all_merge_labelids), [[x,] for x in np.unique(all_merge_labelids)])))
    for i in xrange(0, 3):
        prev_change_hash = IPL(data=change_hash)
        for x in all_merge_labelids:
            hfp.logging('Adding {} and {}', *x)
            change_hash[x[0]] += change_hash[x[1]]
            change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
            change_hash[x[1]] += change_hash[x[0]]
            change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
    # This removes the redundancy from the hash
    def reduce(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break
    reduce(change_hash)
    # And now we have a perfect change list which we just need to iterate over and change the labels in the image
    hfp.logging('change_hash after change:')
    hfp.logging(change_hash)
    hfp[targetnames[4]] = change_hash

    # Create the merged image
    # hfp.deepcopy_entry('largeobj', targetnames[0])
    hfp.rename_entry('largeobj', targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                hfp.logging('Setting {} to {}!', x, k)
                hfp.filter_values(x, type='eq', setto=k, keys=targetnames[0])
    if thisparams['return_bordercontact_images']:
        return bordercontacts
    else:
        return None


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'intermedfolder',
                  'filename': 'largeobjfile',
                  'skeys': 'largeobjname'
              },
              recursive_search=True)
    params = ipl.get_params()
    thisparams = params['find_border_contacts']
    ipl.data_from_file(params['intermedfolder'] + params['largeobjmfile'],
                       skeys=params['largeobjmnames'][0],
                       recursive_search=True,
                       integrate=True)
    ipl.startlogger(filename=params['resultfolder'] +
                    'find_border_contacts.log',
                    type='w')

    try:
def run_find_border_contacts(yamlfile, logging=True):

    ipl = IPL(yaml=yamlfile)

    ipl.set_indent(1)

    params = rdict(data=ipl.get_params())
    if logging:
        ipl.startlogger(filename=params['resultfolder'] + 'find_border_contacts.log', type='w', name='FindBorderContacts')
    else:
        ipl.startlogger()

    try:

        # # Copy the script file and the parameters to the scriptsfolder
        # copy(inspect.stack()[0][1], params['scriptsfolder'])
        # copy(yamlfile, params['scriptsfolder'] + 'find_border_contacts.parameters.yml')

        # ipl.logging('\nInitial datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        find_border_contacts(ipl)

        # ipl.logging('\nFinal datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        # ipl.write(filepath=params['intermedfolder'] + params['largeobjfile'])

        ipl.logging('')
        ipl.stoplogger()

    except:

        ipl.errout('Unexpected error')
Ejemplo n.º 30
0
from hdf5_image_processing import Hdf5ImageProcessingLib as IPL
import os
import numpy as np


__author__ = 'jhennies'


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'
    ipl = IPL(
        yaml=yamlfile
    )

    ipl.logging('Parameters: {}', ipl.get_params())
    params = ipl.get_params()

    ipl.data_from_file(filepath=params['datafolder'] + 'cremi.splA.raw_neurons.crop.h5',
                       skeys='raw',
                       tkeys='raw')

    ipl.crop_bounding_rect(np.s_[10:110, 200:712, 200:712], keys='raw')

    ipl.write(filepath=params['datafolder'] + 'cremi.splA.raw_neurons.crop.crop_10-200-200_110-712-712.h5')
            data = IPL(data={k: v})
            data.setlogger(ipl.getlogger())
            merged[kl] = merge_adjacent_objects(data, k, thisparams)

    return merged


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'intermedfolder',
                  'filename': 'largeobjfile'
              })
    params = ipl.get_params()
    ipl.startlogger(filename=params['resultfolder'] +
                    'merge_adjacent_objects.log',
                    type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile,
             params['scriptsfolder'] + 'merge_adjacent_objects.parameters.yml')
        # Write script and parameters to the logfile
        ipl.code2log(inspect.stack()[0][1])
Ejemplo n.º 32
0
def merge_adjacent_objects(
        ipl, key, numberbysize=0, numberbyrandom=0, seed=None,
        targetnames=('largeobjm', 'mergeids_small', 'mergeids_random', 'change_hash'),
        algorithm='standard'
):
    """
    :param ipl:

    ipl.get_params():

        merge_adjacent_objects
            seed
            numberbysize
            numberbyrandom

        largeobjmnames
            - 'largeobj_merged'
            - 'mergeids_small'
            - 'mergeids_random'
            - 'mergeids_all'
            - 'change_hash'

    :param key: the source key for calculation
    """

    # This removes the redundancy from the hash
    def reduce_hash(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce_hash(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break

    if algorithm == 'pairs':

        # Get only the relevant labels
        labels = lib.unique(ipl[key])
        ipl.logging('labels = {}', labels)

        # Seed the randomize function
        if seed:
            random.seed(seed)
        else:
            random.seed()

        ipl.astype(np.uint32, keys=key)
        (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
        edge_ids = rag.edgeIds()

        merge_ids = []
        used_ids = ()

        # Calculate the merges such that only pairs are found
        for i in xrange(0, numberbyrandom):

            c = 0
            finished = False
            while not finished:

                edge_id = random.choice(edge_ids)
                uid = rag.uId(rag.edgeFromId(edge_id))
                vid = rag.vId(rag.edgeFromId(edge_id))

                c += 1
                if c > 50:
                    ipl.logging('merge_adjacent_objects: Warning: Not finding any more pairs!')
                    finished = True

                elif uid not in used_ids and vid not in used_ids:
                    finished = True
                    used_ids += (uid, vid)
                    merge_ids.append((uid, vid))

        ipl.logging('Label IDs randomly selected for merging: {}', merge_ids)

        # Sort
        merge_ids = [sorted(x) for x in merge_ids]
        merge_ids = sorted(merge_ids)
        ipl.logging('merge_ids = {}', merge_ids)

        # Store this for later use
        ipl[targetnames[2]] = merge_ids
        ipl[targetnames[3]] = merge_ids

        # # Create change hash list
        # change_hash = IPL(data=dict(zip(np.unique(merge_ids), [[x, ] for x in np.unique(merge_ids)])))
        # for i in xrange(0, 3):
        #     prev_change_hash = IPL(data=change_hash)
        #     for x in merge_ids:
        #         ipl.logging('Adding {} and {}', *x)
        #         change_hash[x[0]] += change_hash[x[1]]
        #         change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
        #         change_hash[x[1]] += change_hash[x[0]]
        #         change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
        #
        # reduce_hash(change_hash)
        # # Change the list in the hash to np-arrays for better storage in h5 files
        # for k, v in change_hash.iteritems():
        #     change_hash[k] = np.array(v)
        # # And now we have a perfect change list which we just need to iterate over and change the labels in the image
        # ipl.logging('change_hash after change:')

        us = [x[0] for x in merge_ids]
        change_hash = IPL(data=dict(zip(us, merge_ids)))

        ipl.logging('change_hash: {}', change_hash)
        ipl[targetnames[4]] = change_hash

    elif algorithm == 'standard':

        # Get only the relevant labels
        labels = lib.unique(ipl[key])
        ipl.logging('labels = {}', labels)

        # Seed the randomize function
        random.seed(seed)

        ipl.astype(np.uint32, keys=key)
        (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
        edge_ids = rag.edgeIds()
        # ipl.logging('Edge ids: {}', edge_ids)

        # Type 1:
        # Select edges by size (smallest edges)
        ipl.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
        edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
        # ifp.logging('edgelen_ids = {}', edgelen_ids)
        sorted_edgelens = np.sort(rag.edgeLengths())
        #
        smallest_merge_lens = sorted_edgelens[0:numberbysize]
        ipl.logging('Lengths selected for merging: {}', smallest_merge_lens)
        #
        smallest_merge_ids = []
        for x in smallest_merge_lens:
            edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
            smallest_merge_ids.append(edge_id)
            edgelen_ids.pop(edge_id)
        #
        edge_ids = edgelen_ids.keys()
        ipl.logging('Edge IDs selected for merging due to size: {}', smallest_merge_ids)

        # Type 2:
        # Randomly choose edges
        random_merge_ids = random.sample(edge_ids, numberbyrandom)
        ipl.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

        # Now get the label ids
        smallest_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids]
        smallest_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids]
        smallest_merge_labelids = list(zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
        random_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in random_merge_ids]
        random_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in random_merge_ids]
        random_merge_labelids = list(zip(random_merge_labelids_u, random_merge_labelids_v))
        ipl.logging('Label IDs selected for merging by size: {}', smallest_merge_labelids)
        ipl.logging('Label IDs randomly selected for merging: {}', random_merge_labelids)

        # Concatenate
        all_merge_labelids = smallest_merge_labelids + random_merge_labelids
        # Sort
        ipl.logging('all_merge_labelids = {}', all_merge_labelids)
        all_merge_labelids = [sorted(x) for x in all_merge_labelids]
        all_merge_labelids = sorted(all_merge_labelids)
        ipl.logging('all_merge_labelids = {}', all_merge_labelids)

        # Store this for later use
        ipl[targetnames[1]] = smallest_merge_labelids
        ipl[targetnames[2]] = random_merge_labelids
        ipl[targetnames[3]] = all_merge_labelids

        # Create change hash list
        change_hash = IPL(data=dict(zip(np.unique(all_merge_labelids), [[x,] for x in np.unique(all_merge_labelids)])))
        for i in xrange(0, 3):
            prev_change_hash = IPL(data=change_hash)
            for x in all_merge_labelids:
                ipl.logging('Adding {} and {}', *x)
                change_hash[x[0]] += change_hash[x[1]]
                change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
                change_hash[x[1]] += change_hash[x[0]]
                change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
        # This removes the redundancy from the hash
        reduce_hash(change_hash)
        # Change the list in the hash to np-arrays for better storage in h5 files
        for k, v in change_hash.iteritems():
            change_hash[k] = np.array(v)
        # And now we have a perfect change list which we just need to iterate over and change the labels in the image
        ipl.logging('change_hash after change:')
        ipl.logging(change_hash)
        ipl[targetnames[4]] = change_hash

    # Create the merged image
    # ipl.deepcopy_entry('largeobj', targetnames[0])
    ipl.rename_entry(key, targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                ipl.logging('Setting {} to {}!', x, k)
                ipl.filter_values(x, type='eq', setto=k, keys=targetnames[0])

    return ipl
Ejemplo n.º 33
0
from hdf5_image_processing import Hdf5ImageProcessingLib as IPL
import processing_lib as lib

# Sample A probs
probs_a = IPL(
    filepath='/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.probs_cantorV1.h5'
)

probs_a.logging('Probs A datastructure\n---\n{}', probs_a.datastructure2string())

probs_a.anytask(lib.swapaxes, 0, 2)

probs_a.write('/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.probs.crop.h5')

reskeys = ('0', '1')
split_probs_a = IPL()
split_probs_a['z'] = probs_a.anytask(lib.split, 2, axis=0, result_keys=reskeys, return_only=True, rtrntype=IPL)
split_probs_a['y'] = probs_a.anytask(lib.split, 2, axis=1, result_keys=reskeys, return_only=True, rtrntype=IPL)
split_probs_a['x'] = probs_a.anytask(lib.split, 2, axis=2, result_keys=reskeys, return_only=True, rtrntype=IPL)

split_probs_a = split_probs_a.switch_levels(1, 2)
probs_a.logging('Split sample A datastructure\n---\n{}', split_probs_a.datastructure2string())

split_probs_a.write('/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.probs.crop.split_xyz.h5')
    for d, k, v, kl in ipl.data_iterator(yield_short_kl=True):

        if k == params['locmaxbordernames'][2]:
            ipl[kl].setlogger(ipl.getlogger())
            ipl[kl] = localmax_on_disttransf(ipl[kl], (params['locmaxbordernames'][2], params['locmaxbordernames'][3]), thisparams)


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'locmaxborderfile', 'skeys': {'locmaxbordernames': (2, 3)}},
        recursive_search=True
    )
    params = ipl.get_params()
    thisparams = params['localmax_on_disttransf']
    ipl.startlogger(filename=params['resultfolder'] + 'localmax_on_disttransf.log', type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile, params['scriptsfolder'] + 'localmax_on_disttransf.parameters.yml')
        # Write script and parameters to the logfile
        ipl.code2log(inspect.stack()[0][1])
        ipl.logging('')
        ipl.yaml2log()
Ejemplo n.º 35
0

if __name__ == "__main__":

    # cremi = IPL(filepath='/mnt/localdata02/jhennies/neuraldata/cremi_2016/sample_B_20160501.hdf')
    #
    # cremi.logging('Datastructure:\n---\n{}', cremi.datastructure2string())
    #
    # images = IPL(data={
    #     'raw': cremi['volumes', 'raw'],
    #     'neuron_ids': cremi['volumes', 'labels', 'neuron_ids']
    # })
    #
    # images.logging('Datastructure:\n---\n{}', images.datastructure2string())
    #
    # images.write('/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splB.raw_neurons.crop.h5')



    cremi = IPL(filepath='/mnt/localdata02/jhennies/neuraldata/cremi_2016/sample_C_20160501.hdf')

    cremi.logging('Datastructure:\n---\n{}', cremi.datastructure2string())

    images = IPL(data={
        'raw': cremi['volumes', 'raw'],
        'neuron_ids': cremi['volumes', 'labels', 'neuron_ids']
    })

    images.logging('Datastructure:\n---\n{}', images.datastructure2string())

    images.write('/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splC.raw_neurons.crop.h5')
        for fk, fv in feature_images.iteritems():

            features["false", k, fk] = get_features(
                v, fv, thisparams["features"], thisparams["max_paths_per_label"], hfp=hfp
            )

    # for i, k, ks, vs in hfp['paths_true'].simultaneous_iterator():
    #     pass


if __name__ == "__main__":

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + "/parameters.yml"

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + "/parameters.yml"
    hfp = IPL(yaml=yamlfile, yamlspec={"path": "intermedfolder", "filename": "pathstruefile"}, castkey=None)
    # hfp.logging('datastructure:\n---\n{}', hfp.datastructure2string())
    params = hfp.get_params()

    hfp["true", "border"] = IPL(data=hfp["largeobj", "border_locmax", "path"])
    hfp["true", "locmax"] = IPL(data=hfp["largeobj", "locmax", "path"])
    hfp.pop("largeobj")

    hfp.data_from_file(filepath=params["intermedfolder"] + params["pathsfalsefile"])

    hfp["false", "border"] = IPL(data=hfp["largeobjm", "border_locmax_m", "path"])
    hfp["false", "locmax"] = IPL(data=hfp["largeobjm", "locmaxm", "path"])
    hfp.pop("largeobjm")

    hfp.pop("pathsim")
    hfp.pop("overlay")
Ejemplo n.º 37
0
        v['1']['false'] = make_feature_array(v['1']['false'])

        result[k, '0'] = random_forest(v['0'], v['1'])
        result[k, '1'] = random_forest(v['1'], v['0'])

    return result


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    features = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'featurefile'}
    )
    params = features.get_params()
    thisparams = params['random_forest']
    features.startlogger(filename=params['resultfolder'] + 'random_forest.log', type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile, params['scriptsfolder'] + 'random_forest.parameters.yml')
        # Write script and parameters to the logfile
        features.code2log(inspect.stack()[0][1])
        features.logging('')
        features.yaml2log()
        features.logging('')
        if k == params['labelsname']:

            ipl.logging('===============================\nWorking on image: {}', kl + [k])

            ipl[kl].setlogger(ipl.getlogger())
            ipl[kl] = accumulate_small_objects(ipl[kl], k, thisparams)


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'datafolder', 'filename': 'labelsfile'}
    )
    params = ipl.get_params()
    ipl.startlogger(filename=params['resultfolder']+'remove_small_objects.log', type='a')

    try:

        # Create folder for scripts
        if not os.path.exists(params['scriptsfolder']):
            os.makedirs(params['scriptsfolder'])
        else:
            if params['overwriteresults']:
                ipl.logging('remove_small_objects: Warning: Scriptsfolder already exists and content will be overwritten...\n')
            else:
                raise IOError('remove_small_objects: Error: Scriptsfolder already exists!')
Ejemplo n.º 39
0
    # sample_a.logging('Sample A datastructure\n---\n{}', sample_a.datastructure2string())
    #
    # reskeys = ('0', '1')
    # split_sample_a = IPL()
    # split_sample_a['z'] = sample_a.anytask(lib.split, 2, axis=0, result_keys=reskeys, return_only=True)
    # split_sample_a['y'] = sample_a.anytask(lib.split, 2, axis=1, result_keys=reskeys, return_only=True)
    # split_sample_a['x'] = sample_a.anytask(lib.split, 2, axis=2, result_keys=reskeys, return_only=True)
    #
    # split_sample_a = split_sample_a.switch_levels(1, 2)
    # sample_a.logging('Split sample A datastructure\n---\n{}', split_sample_a.datastructure2string())
    #
    # split_sample_a.write('/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.raw_neurons.crop.split_xyz.h5')

    # Sample B
    sample = IPL(
        filepath='/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splB.raw_neurons.crop.h5'
    )

    sample.logging('Sample B datastructure\n---\n{}', sample.datastructure2string())

    reskeys = ('0', '1')
    split_sample = IPL()
    split_sample['z'] = sample.anytask(lib.split, 2, axis=0, result_keys=reskeys, return_only=True)
    split_sample['y'] = sample.anytask(lib.split, 2, axis=1, result_keys=reskeys, return_only=True)
    split_sample['x'] = sample.anytask(lib.split, 2, axis=2, result_keys=reskeys, return_only=True)

    split_sample = split_sample.switch_levels(1, 2)
    sample.logging('Split sample B datastructure\n---\n{}', split_sample.datastructure2string())

    split_sample.write('/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splB.raw_neurons.crop.split_xyz.h5')
        if k == params['largeobjname']:

            data = IPL(data={k: v})
            data.setlogger(ipl.getlogger())
            merged[kl] = merge_adjacent_objects(data, k, thisparams)

    return merged

if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'largeobjfile'}
    )
    params = ipl.get_params()
    ipl.startlogger(filename=params['resultfolder'] + 'merge_adjacent_objects.log', type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile, params['scriptsfolder'] + 'merge_adjacent_objects.parameters.yml')
        # Write script and parameters to the logfile
        ipl.code2log(inspect.stack()[0][1])
        ipl.logging('')
        ipl.yaml2log()
        ipl.logging('')
Ejemplo n.º 41
0
import matplotlib.pylab as lab
import processing_lib as lib

# TODO: Visualize Distancetransform along paths of true and false merges
# TODO: Also intensity values or raw data and probability map
# TODO: Do not forget to save the result for the thesis!

__author__ = 'jhennies'

if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'
    hfp = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'intermedfolder',
                  'filename': 'pathstruefile'
              },
              tkeys='true',
              castkey=None)

    params = hfp.get_params()

    hfp.data_from_file(filepath=params['intermedfolder'] +
                       params['pathsfalsefile'],
                       tkeys='false',
                       castkey=None)

    hfp.data_from_file(filepath=params['intermedfolder'] +
                       params['locmaxfile'],
                       skeys=('disttransf', 'disttransfm'),
                       tkeys=('disttransf', 'disttransfm'))
                                       ignoreLabel=0,
                                       reverse_order=True,
                                       reciprocal=False,
                                       keys=('paths_true', 'paths_false'),
                                       indict=disttransf_images)


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'

    # TODO: Insert code here
    hfp = Hdf5ImageProcessingLib(yaml=yamlfile,
                                 yamlspec={
                                     'path': 'intermedfolder',
                                     'filename': 'pathstruefile'
                                 },
                                 tkeys='true',
                                 castkey=None)
    params = hfp.get_params()
    hfp.logging('params = {}', params)
    hfp.data_from_file(filepath=params['intermedfolder'] +
                       params['pathsfalsefile'],
                       tkeys='false',
                       castkey=None)
    hfp.startlogger(filename=params['intermedfolder'] +
                    'features_of_paths.log',
                    type='a')

    try:
                '===============================\nWorking on image: {}',
                kl + [k])

            ipl[kl].setlogger(ipl.getlogger())
            ipl[kl] = accumulate_small_objects(ipl[kl], k, thisparams)


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'datafolder',
                  'filename': 'labelsfile'
              })
    params = ipl.get_params()
    ipl.startlogger(filename=params['resultfolder'] +
                    'remove_small_objects.log',
                    type='a')

    try:

        # Create folder for scripts
        if not os.path.exists(params['scriptsfolder']):
            os.makedirs(params['scriptsfolder'])
        else:
            if params['overwriteresults']:
                ipl.logging(
                ipl[kl], disttransf_images[kl], feature_images[kl],
                thisparams
            )

    return features


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    ipl = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'pathstruefile'},
        skeys='path', recursive_search=True
    )
    # ipl.logging('datastructure:\n---\n{}', ipl.datastructure2string())
    params = ipl.get_params()
    ipl.rename_layer('largeobj', 'true')

    # ipl['true', 'border'] = IPL(data=ipl['largeobj', 'border_locmax', 'path'])
    # ipl['true', 'locmax'] = IPL(data=ipl['largeobj', 'locmax', 'path'])
    # ipl.pop('largeobj')

    ipl.data_from_file(filepath=params['intermedfolder'] + params['pathsfalsefile'],
                       skeys='path', recursive_search=True, integrate=True)
    ipl.rename_layer('largeobjm', 'false')
    ipl.remove_layer('path')
Ejemplo n.º 45
0
def run_paths_of_merges(yamlfile, logging=True):

    ipl = IPL(yaml=yamlfile)

    ipl.set_indent(1)

    params = rdict(data=ipl.get_params())
    if logging:
        ipl.startlogger(filename=params['resultfolder'] +
                        'paths_of_merges.log',
                        type='w',
                        name='PathsOfMerges')
    else:
        ipl.startlogger()

    try:

        # # Copy the script file and the parameters to the scriptsfolder
        # copy(inspect.stack()[0][1], params['scriptsfolder'])
        # copy(yamlfile, params['scriptsfolder'] + 'paths_of_merges.parameters.yml')

        # ipl.logging('\nInitial datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        paths_of_merges(ipl, params['debug'])

        # ipl.logging('\nFinal datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        # ipl.write(filepath=params['intermedfolder'] + params['largeobjfile'])

        ipl.logging('')
        ipl.stoplogger()

    except:

        ipl.errout('Unexpected error')
def load_images(ipl):
    """
    These images are loaded:
    paths_true (paths within single label objects)
    paths_false (paths of merged objects which cross the merging site)
    featureims_true
    featureims_false
    :param ipl:
    :return:
    """
    paths_true = IPL()
    paths_false = IPL()
    featureims_true = IPL()
    featureims_false = IPL()

    params = ipl.get_params()

    ipl.logging('Loading true paths ...')
    # Paths within labels (true paths)
    paths_true.data_from_file(
        filepath=params['intermedfolder'] + params['pathstruefile'],
        skeys='path',
        recursive_search=True, nodata=True
    )

    ipl.logging('Loading false paths ...')
    # Paths of merges (false paths)
    paths_false.data_from_file(
        filepath=params['intermedfolder'] + params['pathsfalsefile'],
        skeys='path',
        recursive_search=True, nodata=True
    )

    ipl.logging('Loading features for true paths ...')
    # Load features for true paths
    featureims_true.data_from_file(
        filepath=params['intermedfolder'] + params['featureimsfile'],
        nodata=True
    )
    featureims_true.delete_items(params['largeobjmnames'][0])

    ipl.logging('Loading features for false paths ...')
    # Load features for false paths
    featureims_false.data_from_file(
        filepath=params['intermedfolder'] + params['featureimsfile'],
        nodata=True
    )
    featureims_false.delete_items(params['largeobjname'])

    return (paths_true, paths_false, featureims_true, featureims_false)
    # hfp.deepcopy_entry('largeobj', targetnames[0])
    hfp.rename_entry('largeobj', targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                hfp.logging('Setting {} to {}!', x, k)
                hfp.filter_values(x, type='eq', setto=k, keys=targetnames[0])


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'

    hfp = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'datafolder', 'filename': 'largeobjfile', 'skeys': 'largeobjname'},
        tkeys='largeobj',
        castkey=None
    )
    params = hfp.get_params()
    hfp.startlogger(filename=params['resultfolder'] + 'merge_adjacent_objects.log', type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile, params['scriptsfolder'] + 'merge_adjacent_objects.parameters.yml')
        # Write script and parameters to the logfile
        hfp.code2log(inspect.stack()[0][1])
        hfp.logging('')
        hfp.yaml2log()
        hfp.logging('')
Ejemplo n.º 48
0

if __name__ == "__main__":

    infiles = [
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.probs.crop.h5',
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.raw_neurons.crop.h5'
    ]
    outfiles = [
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.probs.crop.crop_x10_110_y200_712_z200_712.split_xyz.h5',
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.train.raw_neurons.crop.crop_x10_110_y200_712_z200_712.split_xyz.h5'
    ]

    for i in xrange(0, len(infiles)):

        ipl = IPL(filepath=infiles[i])
        ipl.logging('Datastructure\n---\n{}', ipl.datastructure2string())

        ipl.crop_bounding_rect(bounds=np.s_[10:110, 200:712, 200:712])

        def shape(image):
            return image.shape

        print ipl.datastructure2string(function=shape)

        ipl_split = split_in_xyz(ipl)

        ipl_split.write(filepath=outfiles[i])

    # # Sample A
    # sample_a = IPL(
Ejemplo n.º 49
0
from hdf5_image_processing import Hdf5ImageProcessingLib as IPL
import os
import numpy as np

__author__ = 'jhennies'

if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'
    ipl = IPL(yaml=yamlfile)

    ipl.logging('Parameters: {}', ipl.get_params())
    params = ipl.get_params()

    ipl.data_from_file(filepath=params['datafolder'] +
                       'cremi.splA.raw_neurons.crop.h5',
                       skeys='raw',
                       tkeys='raw')

    ipl.crop_bounding_rect(np.s_[10:110, 200:712, 200:712], keys='raw')

    ipl.write(filepath=params['datafolder'] +
              'cremi.splA.raw_neurons.crop.crop_10-200-200_110-712-712.h5')
Ejemplo n.º 50
0
                                        thisparams['features'],
                                        thisparams['max_paths_per_label'],
                                        hfp=hfp)

    # for i, k, ks, vs in hfp['paths_true'].simultaneous_iterator():
    #     pass


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'
    hfp = IPL(yaml=yamlfile,
              yamlspec={
                  'path': 'intermedfolder',
                  'filename': 'pathstruefile'
              },
              castkey=None)
    # hfp.logging('datastructure:\n---\n{}', hfp.datastructure2string())
    params = hfp.get_params()

    hfp['true', 'border'] = IPL(data=hfp['largeobj', 'border_locmax', 'path'])
    hfp['true', 'locmax'] = IPL(data=hfp['largeobj', 'locmax', 'path'])
    hfp.pop('largeobj')

    hfp.data_from_file(filepath=params['intermedfolder'] +
                       params['pathsfalsefile'])

    hfp['false', 'border'] = IPL(data=hfp['largeobjm', 'border_locmax_m',
                                          'path'])
    hfp['false', 'locmax'] = IPL(data=hfp['largeobjm', 'locmaxm', 'path'])
Ejemplo n.º 51
0
        result[k, '0'] = random_forest(v['0'], v['1'])
        result[k, '1'] = random_forest(v['1'], v['0'])

    return result


if __name__ == '__main__':

    resultsfolder = '/mnt/localdata02/jhennies/neuraldata/results/cremi_2016/161110_random_forest_of_paths/'

    yamlfile = resultsfolder + '/parameters.yml'

    features = IPL(yaml=yamlfile,
                   yamlspec={
                       'path': 'intermedfolder',
                       'filename': 'featurefile'
                   })
    params = features.get_params()
    thisparams = params['random_forest']
    features.startlogger(filename=params['resultfolder'] + 'random_forest.log',
                         type='w')

    try:

        # Copy the script file and the parameters to the scriptsfolder
        copy(inspect.stack()[0][1], params['scriptsfolder'])
        copy(yamlfile,
             params['scriptsfolder'] + 'random_forest.parameters.yml')
        # Write script and parameters to the logfile
        features.code2log(inspect.stack()[0][1])
Ejemplo n.º 52
0
    # sample_a.logging('Sample A datastructure\n---\n{}', sample_a.datastructure2string())
    #
    # reskeys = ('0', '1')
    # split_sample_a = IPL()
    # split_sample_a['z'] = sample_a.anytask(lib.split, 2, axis=0, result_keys=reskeys, return_only=True)
    # split_sample_a['y'] = sample_a.anytask(lib.split, 2, axis=1, result_keys=reskeys, return_only=True)
    # split_sample_a['x'] = sample_a.anytask(lib.split, 2, axis=2, result_keys=reskeys, return_only=True)
    #
    # split_sample_a = split_sample_a.switch_levels(1, 2)
    # sample_a.logging('Split sample A datastructure\n---\n{}', split_sample_a.datastructure2string())
    #
    # split_sample_a.write('/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splA.raw_neurons.crop.split_xyz.h5')

    # Sample B
    sample = IPL(
        filepath=
        '/mnt/localdata02/jhennies/neuraldata/cremi_2016/cremi.splB.raw_neurons.crop.h5'
    )

    sample.logging('Sample B datastructure\n---\n{}',
                   sample.datastructure2string())

    reskeys = ('0', '1')
    split_sample = IPL()
    split_sample['z'] = sample.anytask(lib.split,
                                       2,
                                       axis=0,
                                       result_keys=reskeys,
                                       return_only=True)
    split_sample['y'] = sample.anytask(lib.split,
                                       2,
                                       axis=1,
def run_remove_small_objects(yamlfile):

    ipl = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'datafolder', 'filename': 'labelsfile', 'skeys': 'labelsname'},
        recursive_search=True,
        nodata=True
    )

    # Set indentation of the logging
    ipl.set_indent(1)

    params = ipl.get_params()
    ipl.startlogger(filename=params['resultfolder'] + 'remove_small_objects.log', type='w', name='RemoveSmallObjects')

    try:

        # # Copy the script file and the parameters to the scriptsfolder
        # copy(inspect.stack()[0][1], params['scriptsfolder'])
        # copy(yamlfile, params['scriptsfolder'] + 'remove_small_objects.parameters.yml')

        ipl.logging('\nipl datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        remove_small_objects(ipl)

        ipl.logging('\nFinal datastructure: \n\n{}', ipl.datastructure2string(maxdepth=3))

        # ipl.write(filepath=params['intermedfolder'] + params['largeobjfile'])

        ipl.logging('')
        ipl.stoplogger()

    except:

        ipl.errout('Unexpected error')
#
#         ifp.errout('Unexpected error', traceback)
#
#     try:
#         hfp.write(filepath=params['intermedfolder'] + params['pathsfalsefile'])
#     except:
#         pass


if __name__ == '__main__':

    yamlfile = os.path.dirname(os.path.abspath(__file__)) + '/parameters.yml'

    hfp = IPL(
        yaml=yamlfile,
        yamlspec={'path': 'intermedfolder', 'filename': 'locmaxborderfile', 'skeys': {'locmaxbordernames': (1, 3)}},
        tkeys=('border_locmax_m', 'disttransfm'),
        castkey=None
    )
    params = hfp.get_params()
    thisparams = params['paths_of_partners']
    hfp.startlogger(filename=params['resultfolder'] + 'paths_of_partners.log', type='w')
    hfp.data_from_file(params['intermedfolder'] + params['largeobjfile'],
                       skeys=params['largeobjname'],
                       tkeys='largeobj')
    hfp.data_from_file(params['intermedfolder'] + params['largeobjmfile'],
                       skeys=(params['largeobjmnames'][0], params['largeobjmnames'][4]),
                       tkeys=('largeobjm', 'change_hash'))
    hfp.data_from_file(params['intermedfolder'] + params['locmaxfile'],
                       skeys=params['locmaxnames'][0],
                       tkeys='locmaxm')