def get_features(paths, featureimage, featurelist, max_paths_per_label, hfp=None):

    newfeats = IPL()
    keylist = range(0, max_paths_per_label)
    keylist = [str(x) for x in keylist]
    for i, keys, vals in paths.simultaneous_iterator(max_count_per_item=max_paths_per_label, keylist=keylist):

        if hfp is not None:
            hfp.logging("Working in iteration = {}", i)

        image = np.zeros(featureimage.shape, dtype=np.uint32)

        c = 1
        for curk, curv in (dict(zip(keys, vals))).iteritems():

            curv = lib.swapaxes(curv, 0, 1)
            lib.positions2value(image, curv, c)
            c += 1

        newnewfeats = IPL(
            data=vigra.analysis.extractRegionFeatures(featureimage, image, ignoreLabel=0, features=featurelist)
        )

        for k, v in newnewfeats.iteritems():
            newnewfeats[k] = newnewfeats[k][1:]
            if k in newfeats:
                try:
                    newfeats[k] = np.concatenate((newfeats[k], newnewfeats[k]))
                except ValueError:
                    pass
            else:
                newfeats[k] = newnewfeats[k]

    return newfeats
Esempio n. 2
0
def get_features(paths, featureimages, featurelist, max_paths_per_label, ipl=None):

    newfeats = IPL()

    keylist = range(0, max_paths_per_label)
    keylist = [str(x) for x in keylist]

    # Iterate over all paths, yielding a list of one path per label object until no paths are left
    for i, keys, vals in paths.simultaneous_iterator(
            max_count_per_item=max_paths_per_label,
            keylist=keylist):

        if ipl is not None:
            ipl.logging('Working in iteration = {}', i)
            ipl.logging('Keys: {}', keys)

        if not keys:
            continue

        # Create a working image
        image = np.zeros(np.array(featureimages.yield_an_item()).shape, dtype=np.uint32)
        # And fill it with one path per label object
        c = 1
        for curk, curv in (dict(zip(keys, vals))).iteritems():
            curv = np.array(curv)
            curv = lib.swapaxes(curv, 0, 1)
            lib.positions2value(image, curv, c)
            c += 1

        # TODO: If this loop iterated over the parameter list it would be more broadly applicable
        for d, k, v, kl in featureimages.data_iterator():

            if type(v) is not IPL:

                # Extract the region features of the working image
                # TODO: Extract feature 'Count' manually due to anisotropy
                newnewfeats = IPL(
                    data=vigra.analysis.extractRegionFeatures(
                        np.array(v).astype(np.float32),
                        image, ignoreLabel=0,
                        features=featurelist
                    )
                )
                # Append to the recently computed list of features
                for nk, nv in newnewfeats.iteritems():
                    nv = nv[1:]
                    if newfeats.inkeys(kl+[nk]):
                        try:
                            newfeats[kl + [nk]] = np.concatenate((newfeats[kl + [nk]], nv))
                        except ValueError:
                            pass
                    else:
                        newfeats[kl + [nk]] = nv

    return newfeats
def get_features(paths, featureimage, featurelist, max_paths_per_label, ipl=None):

    newfeats = IPL()

    # TODO: Selection of a limited amount of paths should be random
    keylist = range(0, max_paths_per_label)
    keylist = [str(x) for x in keylist]

    # Iterate over all paths, yielding a list of one path per label object until no paths are left
    for i, keys, vals in paths.simultaneous_iterator(
            max_count_per_item=max_paths_per_label,
            keylist=keylist):

        if ipl is not None:
            ipl.logging('Working in iteration = {}', i)

        # Create a working image
        image = np.zeros(featureimage.shape, dtype=np.uint32)
        # And fill it with one path per label object
        c = 1
        for curk, curv in (dict(zip(keys, vals))).iteritems():
            curv = lib.swapaxes(curv, 0, 1)
            lib.positions2value(image, curv, c)
            c += 1

        # Extract the region features of the working image
        newnewfeats = IPL(
            data=vigra.analysis.extractRegionFeatures(
                featureimage,
                image, ignoreLabel=0,
                features=featurelist
            )
        )

        for k, v in newnewfeats.iteritems():
            newnewfeats[k] = newnewfeats[k][1:]
            if k in newfeats:
                try:
                    newfeats[k] = np.concatenate((newfeats[k], newnewfeats[k]))
                except ValueError:
                    pass
            else:
                newfeats[k] = newnewfeats[k]

    return newfeats
Esempio n. 4
0
def get_features(paths,
                 featureimage,
                 featurelist,
                 max_paths_per_label,
                 hfp=None):

    newfeats = IPL()
    keylist = range(0, max_paths_per_label)
    keylist = [str(x) for x in keylist]
    for i, keys, vals in paths.simultaneous_iterator(
            max_count_per_item=max_paths_per_label, keylist=keylist):

        if hfp is not None:
            hfp.logging('Working in iteration = {}', i)

        image = np.zeros(featureimage.shape, dtype=np.uint32)

        c = 1
        for curk, curv in (dict(zip(keys, vals))).iteritems():

            curv = lib.swapaxes(curv, 0, 1)
            lib.positions2value(image, curv, c)
            c += 1

        newnewfeats = IPL(data=vigra.analysis.extractRegionFeatures(
            featureimage, image, ignoreLabel=0, features=featurelist))

        for k, v in newnewfeats.iteritems():
            newnewfeats[k] = newnewfeats[k][1:]
            if k in newfeats:
                try:
                    newfeats[k] = np.concatenate((newfeats[k], newnewfeats[k]))
                except ValueError:
                    pass
            else:
                newfeats[k] = newnewfeats[k]

    return newfeats
def merge_adjacent_objects(hfp):

    params = hfp.get_params()
    thisparams = params['merge_adjacent_objects']

    numberbysize = thisparams['numberbysize']
    numberbyrandom = thisparams['numberbyrandom']
    targetnames = params['largeobjmnames']

    # Get only the relevant labels
    labels = lib.unique(hfp['largeobj'])
    hfp.logging('labels = {}', labels)

    # Seed the randomize function
    random.seed(thisparams['seed'])

    hfp.astype(np.uint32, keys='largeobj')
    (grag, rag) = graphs.gridRegionAdjacencyGraph(hfp['largeobj'], ignoreLabel=0)
    edge_ids = rag.edgeIds()
    # hfp.logging('Edge ids: {}', edge_ids)

    # Type 1:
    # Select edges by size (smallest edges)
    hfp.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
    edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
    # ifp.logging('edgelen_ids = {}', edgelen_ids)
    sorted_edgelens = np.sort(rag.edgeLengths())
    #
    smallest_merge_lens = sorted_edgelens[0:numberbysize]
    hfp.logging('Lengths selected for merging: {}', smallest_merge_lens)
    #
    smallest_merge_ids = []
    for x in smallest_merge_lens:
        edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
        smallest_merge_ids.append(edge_id)
        edgelen_ids.pop(edge_id)
    #
    edge_ids = edgelen_ids.keys()
    hfp.logging('Edge IDs selected for merging due to size: {}', smallest_merge_ids)

    # Type 2:
    # Randomly choose edges
    random_merge_ids = random.sample(edge_ids, numberbyrandom)
    hfp.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

    # Now get the label ids
    smallest_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids]
    smallest_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids]
    smallest_merge_labelids = list(zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
    random_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in random_merge_ids]
    random_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in random_merge_ids]
    random_merge_labelids = list(zip(random_merge_labelids_u, random_merge_labelids_v))
    hfp.logging('Label IDs selected for merging by size: {}', smallest_merge_labelids)
    hfp.logging('Label IDs randomly selected for merging: {}', random_merge_labelids)

    # Concatenate
    all_merge_labelids = smallest_merge_labelids + random_merge_labelids
    # Sort
    hfp.logging('all_merge_labelids = {}', all_merge_labelids)
    all_merge_labelids = [sorted(x) for x in all_merge_labelids]
    all_merge_labelids = sorted(all_merge_labelids)
    hfp.logging('all_merge_labelids = {}', all_merge_labelids)

    # Store this for later use
    hfp[targetnames[1]] = smallest_merge_labelids
    hfp[targetnames[2]] = random_merge_labelids
    hfp[targetnames[3]] = all_merge_labelids

    # Create change hash list
    change_hash = IPL(data=dict(zip(np.unique(all_merge_labelids), [[x,] for x in np.unique(all_merge_labelids)])))
    for i in xrange(0, 3):
        prev_change_hash = IPL(data=change_hash)
        for x in all_merge_labelids:
            hfp.logging('Adding {} and {}', *x)
            change_hash[x[0]] += change_hash[x[1]]
            change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
            change_hash[x[1]] += change_hash[x[0]]
            change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
    # This removes the redundancy from the hash
    def reduce(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break
    reduce(change_hash)
    # And now we have a perfect change list which we just need to iterate over and change the labels in the image
    hfp.logging('change_hash after change:')
    hfp.logging(change_hash)
    hfp[targetnames[4]] = change_hash

    # Create the merged image
    # hfp.deepcopy_entry('largeobj', targetnames[0])
    hfp.rename_entry('largeobj', targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                hfp.logging('Setting {} to {}!', x, k)
                hfp.filter_values(x, type='eq', setto=k, keys=targetnames[0])
        #                   tkeys='{}.{}.{}'.format('result_true', k, k2))

        hfp.pop('disttransf')
        hfp.pop('disttransfm')
        hfp.pop('result_false')
        # hfp.pop('result_true')
        hfp.pop('true')
        hfp.pop('false')

        hfp.logging('hfp datastructure:\n---\n{}---',
                    hfp.datastructure2string(maxdepth=2))

        print hfp['result.6720_13067.8']

        y = []
        for k, v in hfp.iteritems():

            if v:

                try:
                    y.append(v)
                    x = range(0, len(v))
                    plt.plot(x, v)

                    # plt.show()
                    lab.savefig(params['intermedfolder'] + 'plots/' + k +
                                '.png')
                    plt.clf()
                except ValueError:
                    pass
Esempio n. 7
0
def merge_adjacent_objects(
        ipl, key, numberbysize=0, numberbyrandom=0, seed=None,
        targetnames=('largeobjm', 'mergeids_small', 'mergeids_random', 'change_hash'),
        algorithm='standard'
):
    """
    :param ipl:

    ipl.get_params():

        merge_adjacent_objects
            seed
            numberbysize
            numberbyrandom

        largeobjmnames
            - 'largeobj_merged'
            - 'mergeids_small'
            - 'mergeids_random'
            - 'mergeids_all'
            - 'change_hash'

    :param key: the source key for calculation
    """

    # This removes the redundancy from the hash
    def reduce_hash(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce_hash(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break

    if algorithm == 'pairs':

        # Get only the relevant labels
        labels = lib.unique(ipl[key])
        ipl.logging('labels = {}', labels)

        # Seed the randomize function
        if seed:
            random.seed(seed)
        else:
            random.seed()

        ipl.astype(np.uint32, keys=key)
        (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
        edge_ids = rag.edgeIds()

        merge_ids = []
        used_ids = ()

        # Calculate the merges such that only pairs are found
        for i in xrange(0, numberbyrandom):

            c = 0
            finished = False
            while not finished:

                edge_id = random.choice(edge_ids)
                uid = rag.uId(rag.edgeFromId(edge_id))
                vid = rag.vId(rag.edgeFromId(edge_id))

                c += 1
                if c > 50:
                    ipl.logging('merge_adjacent_objects: Warning: Not finding any more pairs!')
                    finished = True

                elif uid not in used_ids and vid not in used_ids:
                    finished = True
                    used_ids += (uid, vid)
                    merge_ids.append((uid, vid))

        ipl.logging('Label IDs randomly selected for merging: {}', merge_ids)

        # Sort
        merge_ids = [sorted(x) for x in merge_ids]
        merge_ids = sorted(merge_ids)
        ipl.logging('merge_ids = {}', merge_ids)

        # Store this for later use
        ipl[targetnames[2]] = merge_ids
        ipl[targetnames[3]] = merge_ids

        # # Create change hash list
        # change_hash = IPL(data=dict(zip(np.unique(merge_ids), [[x, ] for x in np.unique(merge_ids)])))
        # for i in xrange(0, 3):
        #     prev_change_hash = IPL(data=change_hash)
        #     for x in merge_ids:
        #         ipl.logging('Adding {} and {}', *x)
        #         change_hash[x[0]] += change_hash[x[1]]
        #         change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
        #         change_hash[x[1]] += change_hash[x[0]]
        #         change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
        #
        # reduce_hash(change_hash)
        # # Change the list in the hash to np-arrays for better storage in h5 files
        # for k, v in change_hash.iteritems():
        #     change_hash[k] = np.array(v)
        # # And now we have a perfect change list which we just need to iterate over and change the labels in the image
        # ipl.logging('change_hash after change:')

        us = [x[0] for x in merge_ids]
        change_hash = IPL(data=dict(zip(us, merge_ids)))

        ipl.logging('change_hash: {}', change_hash)
        ipl[targetnames[4]] = change_hash

    elif algorithm == 'standard':

        # Get only the relevant labels
        labels = lib.unique(ipl[key])
        ipl.logging('labels = {}', labels)

        # Seed the randomize function
        random.seed(seed)

        ipl.astype(np.uint32, keys=key)
        (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
        edge_ids = rag.edgeIds()
        # ipl.logging('Edge ids: {}', edge_ids)

        # Type 1:
        # Select edges by size (smallest edges)
        ipl.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
        edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
        # ifp.logging('edgelen_ids = {}', edgelen_ids)
        sorted_edgelens = np.sort(rag.edgeLengths())
        #
        smallest_merge_lens = sorted_edgelens[0:numberbysize]
        ipl.logging('Lengths selected for merging: {}', smallest_merge_lens)
        #
        smallest_merge_ids = []
        for x in smallest_merge_lens:
            edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
            smallest_merge_ids.append(edge_id)
            edgelen_ids.pop(edge_id)
        #
        edge_ids = edgelen_ids.keys()
        ipl.logging('Edge IDs selected for merging due to size: {}', smallest_merge_ids)

        # Type 2:
        # Randomly choose edges
        random_merge_ids = random.sample(edge_ids, numberbyrandom)
        ipl.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

        # Now get the label ids
        smallest_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids]
        smallest_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids]
        smallest_merge_labelids = list(zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
        random_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in random_merge_ids]
        random_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in random_merge_ids]
        random_merge_labelids = list(zip(random_merge_labelids_u, random_merge_labelids_v))
        ipl.logging('Label IDs selected for merging by size: {}', smallest_merge_labelids)
        ipl.logging('Label IDs randomly selected for merging: {}', random_merge_labelids)

        # Concatenate
        all_merge_labelids = smallest_merge_labelids + random_merge_labelids
        # Sort
        ipl.logging('all_merge_labelids = {}', all_merge_labelids)
        all_merge_labelids = [sorted(x) for x in all_merge_labelids]
        all_merge_labelids = sorted(all_merge_labelids)
        ipl.logging('all_merge_labelids = {}', all_merge_labelids)

        # Store this for later use
        ipl[targetnames[1]] = smallest_merge_labelids
        ipl[targetnames[2]] = random_merge_labelids
        ipl[targetnames[3]] = all_merge_labelids

        # Create change hash list
        change_hash = IPL(data=dict(zip(np.unique(all_merge_labelids), [[x,] for x in np.unique(all_merge_labelids)])))
        for i in xrange(0, 3):
            prev_change_hash = IPL(data=change_hash)
            for x in all_merge_labelids:
                ipl.logging('Adding {} and {}', *x)
                change_hash[x[0]] += change_hash[x[1]]
                change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
                change_hash[x[1]] += change_hash[x[0]]
                change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
        # This removes the redundancy from the hash
        reduce_hash(change_hash)
        # Change the list in the hash to np-arrays for better storage in h5 files
        for k, v in change_hash.iteritems():
            change_hash[k] = np.array(v)
        # And now we have a perfect change list which we just need to iterate over and change the labels in the image
        ipl.logging('change_hash after change:')
        ipl.logging(change_hash)
        ipl[targetnames[4]] = change_hash

    # Create the merged image
    # ipl.deepcopy_entry('largeobj', targetnames[0])
    ipl.rename_entry(key, targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                ipl.logging('Setting {} to {}!', x, k)
                ipl.filter_values(x, type='eq', setto=k, keys=targetnames[0])

    return ipl
def merge_adjacent_objects(ipl, key, thisparams):
    """
    :param ipl:

    ipl.get_params():

        merge_adjacent_objects
            seed
            numberbysize
            numberbyrandom

        largeobjmnames
            - 'largeobj_merged'
            - 'mergeids_small'
            - 'mergeids_random'
            - 'mergeids_all'
            - 'change_hash'

    :param key: the source key for calculation
    """

    numberbysize = thisparams['numberbysize']
    numberbyrandom = thisparams['numberbyrandom']
    targetnames = params['largeobjmnames']

    # Get only the relevant labels
    labels = lib.unique(ipl[key])
    ipl.logging('labels = {}', labels)

    # Seed the randomize function
    random.seed(thisparams['seed'])

    ipl.astype(np.uint32, keys=key)
    (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
    edge_ids = rag.edgeIds()
    # ipl.logging('Edge ids: {}', edge_ids)

    # Type 1:
    # Select edges by size (smallest edges)
    ipl.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
    edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
    # ifp.logging('edgelen_ids = {}', edgelen_ids)
    sorted_edgelens = np.sort(rag.edgeLengths())
    #
    smallest_merge_lens = sorted_edgelens[0:numberbysize]
    ipl.logging('Lengths selected for merging: {}', smallest_merge_lens)
    #
    smallest_merge_ids = []
    for x in smallest_merge_lens:
        edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
        smallest_merge_ids.append(edge_id)
        edgelen_ids.pop(edge_id)
    #
    edge_ids = edgelen_ids.keys()
    ipl.logging('Edge IDs selected for merging due to size: {}',
                smallest_merge_ids)

    # Type 2:
    # Randomly choose edges
    random_merge_ids = random.sample(edge_ids, numberbyrandom)
    ipl.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

    # Now get the label ids
    smallest_merge_labelids_u = [
        rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids
    ]
    smallest_merge_labelids_v = [
        rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids
    ]
    smallest_merge_labelids = list(
        zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
    random_merge_labelids_u = [
        rag.uId(rag.edgeFromId(x)) for x in random_merge_ids
    ]
    random_merge_labelids_v = [
        rag.vId(rag.edgeFromId(x)) for x in random_merge_ids
    ]
    random_merge_labelids = list(
        zip(random_merge_labelids_u, random_merge_labelids_v))
    ipl.logging('Label IDs selected for merging by size: {}',
                smallest_merge_labelids)
    ipl.logging('Label IDs randomly selected for merging: {}',
                random_merge_labelids)

    # Concatenate
    all_merge_labelids = smallest_merge_labelids + random_merge_labelids
    # Sort
    ipl.logging('all_merge_labelids = {}', all_merge_labelids)
    all_merge_labelids = [sorted(x) for x in all_merge_labelids]
    all_merge_labelids = sorted(all_merge_labelids)
    ipl.logging('all_merge_labelids = {}', all_merge_labelids)

    # Store this for later use
    ipl[targetnames[1]] = smallest_merge_labelids
    ipl[targetnames[2]] = random_merge_labelids
    ipl[targetnames[3]] = all_merge_labelids

    # Create change hash list
    change_hash = IPL(data=dict(
        zip(np.unique(all_merge_labelids), [[
            x,
        ] for x in np.unique(all_merge_labelids)])))
    for i in xrange(0, 3):
        prev_change_hash = IPL(data=change_hash)
        for x in all_merge_labelids:
            ipl.logging('Adding {} and {}', *x)
            change_hash[x[0]] += change_hash[x[1]]
            change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
            change_hash[x[1]] += change_hash[x[0]]
            change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
    # This removes the redundancy from the hash
    def reduce(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break

    reduce(change_hash)
    # Change the list in the hash to np-arrays for better storage in h5 files
    for k, v in change_hash.iteritems():
        change_hash[k] = np.array(v)
    # And now we have a perfect change list which we just need to iterate over and change the labels in the image
    ipl.logging('change_hash after change:')
    ipl.logging(change_hash)
    ipl[targetnames[4]] = change_hash

    # Create the merged image
    # ipl.deepcopy_entry('largeobj', targetnames[0])
    ipl.rename_entry(key, targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                ipl.logging('Setting {} to {}!', x, k)
                ipl.filter_values(x, type='eq', setto=k, keys=targetnames[0])

    return ipl
                          tkeys='{}.{}.{}'.format('result_false', k, k2))

        hfp.pop('disttransf')
        hfp.pop('disttransfm')
        # hfp.pop('result_false')
        # hfp.pop('result_true')
        hfp.pop('true')
        hfp.pop('false')
        hfp.pop('raw')

        hfp.logging('hfp datastructure:\n---\n{}---', hfp.datastructure2string(maxdepth=2))

        # print hfp['result.6720_13067.8']

        y = []
        for k, v in hfp.iteritems():

            if v.values()[0]:

                try:
                    # y.append(v)
                    x = range(0, len(v.values()[0]))
                    y = np.swapaxes(np.array(v.values()), 0, 1)
                    plt.plot(x, y)

                    # plt.show()
                    lab.savefig(params['intermedfolder'] + 'plots/' + k + '.png')
                    plt.clf()
                except ValueError:
                    pass