Example #1
0
    def label_iterator(self,
                       key=None,
                       labellist=None,
                       background=None,
                       area=None):
        """
        :param key:
        :param labellist:
        :param background:
        :param area: supply area in the format area=np.s_[numpy indexing], i.e. area=np.s_[:,:,:] for a full 3d image
            Note that this affects only the determination of which labels are iterated over, when labellist is supplied
            this parameter has no effect
        :return:
        """

        if labellist is None:
            labellist = self.unique(keys=key, return_only=True).yield_an_item()
            if area is not None:

                labellist = lib.unique(self[key][area])

        if background is not None:
            labellist = filter(lambda x: x != 0, labellist)

        for lbl in labellist:
            yield lbl
    def label_iterator(self, key=None, labellist=None, background=None, area=None):
        """
        :param key:
        :param labellist:
        :param background:
        :param area: supply area in the format area=np.s_[numpy indexing], i.e. area=np.s_[:,:,:] for a full 3d image
            Note that this affects only the determination of which labels are iterated over, when labellist is supplied
            this parameter has no effect
        :return:
        """

        if labellist is None:
            labellist = self.unique(keys=key, return_only=True)[key]
            if area is not None:

                labellist = lib.unique(self[key][area])

        if background is not None:
            labellist = filter(lambda x: x != 0, labellist)

        for lbl in labellist:
            yield lbl
def merge_adjacent_objects(hfp):

    params = hfp.get_params()
    thisparams = params['merge_adjacent_objects']

    numberbysize = thisparams['numberbysize']
    numberbyrandom = thisparams['numberbyrandom']
    targetnames = params['largeobjmnames']

    # Get only the relevant labels
    labels = lib.unique(hfp['largeobj'])
    hfp.logging('labels = {}', labels)

    # Seed the randomize function
    random.seed(thisparams['seed'])

    hfp.astype(np.uint32, keys='largeobj')
    (grag, rag) = graphs.gridRegionAdjacencyGraph(hfp['largeobj'], ignoreLabel=0)
    edge_ids = rag.edgeIds()
    # hfp.logging('Edge ids: {}', edge_ids)

    # Type 1:
    # Select edges by size (smallest edges)
    hfp.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
    edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
    # ifp.logging('edgelen_ids = {}', edgelen_ids)
    sorted_edgelens = np.sort(rag.edgeLengths())
    #
    smallest_merge_lens = sorted_edgelens[0:numberbysize]
    hfp.logging('Lengths selected for merging: {}', smallest_merge_lens)
    #
    smallest_merge_ids = []
    for x in smallest_merge_lens:
        edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
        smallest_merge_ids.append(edge_id)
        edgelen_ids.pop(edge_id)
    #
    edge_ids = edgelen_ids.keys()
    hfp.logging('Edge IDs selected for merging due to size: {}', smallest_merge_ids)

    # Type 2:
    # Randomly choose edges
    random_merge_ids = random.sample(edge_ids, numberbyrandom)
    hfp.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

    # Now get the label ids
    smallest_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids]
    smallest_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids]
    smallest_merge_labelids = list(zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
    random_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in random_merge_ids]
    random_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in random_merge_ids]
    random_merge_labelids = list(zip(random_merge_labelids_u, random_merge_labelids_v))
    hfp.logging('Label IDs selected for merging by size: {}', smallest_merge_labelids)
    hfp.logging('Label IDs randomly selected for merging: {}', random_merge_labelids)

    # Concatenate
    all_merge_labelids = smallest_merge_labelids + random_merge_labelids
    # Sort
    hfp.logging('all_merge_labelids = {}', all_merge_labelids)
    all_merge_labelids = [sorted(x) for x in all_merge_labelids]
    all_merge_labelids = sorted(all_merge_labelids)
    hfp.logging('all_merge_labelids = {}', all_merge_labelids)

    # Store this for later use
    hfp[targetnames[1]] = smallest_merge_labelids
    hfp[targetnames[2]] = random_merge_labelids
    hfp[targetnames[3]] = all_merge_labelids

    # Create change hash list
    change_hash = IPL(data=dict(zip(np.unique(all_merge_labelids), [[x,] for x in np.unique(all_merge_labelids)])))
    for i in xrange(0, 3):
        prev_change_hash = IPL(data=change_hash)
        for x in all_merge_labelids:
            hfp.logging('Adding {} and {}', *x)
            change_hash[x[0]] += change_hash[x[1]]
            change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
            change_hash[x[1]] += change_hash[x[0]]
            change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
    # This removes the redundancy from the hash
    def reduce(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break
    reduce(change_hash)
    # And now we have a perfect change list which we just need to iterate over and change the labels in the image
    hfp.logging('change_hash after change:')
    hfp.logging(change_hash)
    hfp[targetnames[4]] = change_hash

    # Create the merged image
    # hfp.deepcopy_entry('largeobj', targetnames[0])
    hfp.rename_entry('largeobj', targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                hfp.logging('Setting {} to {}!', x, k)
                hfp.filter_values(x, type='eq', setto=k, keys=targetnames[0])
Example #4
0
def merge_adjacent_objects(
        ipl, key, numberbysize=0, numberbyrandom=0, seed=None,
        targetnames=('largeobjm', 'mergeids_small', 'mergeids_random', 'change_hash'),
        algorithm='standard'
):
    """
    :param ipl:

    ipl.get_params():

        merge_adjacent_objects
            seed
            numberbysize
            numberbyrandom

        largeobjmnames
            - 'largeobj_merged'
            - 'mergeids_small'
            - 'mergeids_random'
            - 'mergeids_all'
            - 'change_hash'

    :param key: the source key for calculation
    """

    # This removes the redundancy from the hash
    def reduce_hash(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce_hash(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break

    if algorithm == 'pairs':

        # Get only the relevant labels
        labels = lib.unique(ipl[key])
        ipl.logging('labels = {}', labels)

        # Seed the randomize function
        if seed:
            random.seed(seed)
        else:
            random.seed()

        ipl.astype(np.uint32, keys=key)
        (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
        edge_ids = rag.edgeIds()

        merge_ids = []
        used_ids = ()

        # Calculate the merges such that only pairs are found
        for i in xrange(0, numberbyrandom):

            c = 0
            finished = False
            while not finished:

                edge_id = random.choice(edge_ids)
                uid = rag.uId(rag.edgeFromId(edge_id))
                vid = rag.vId(rag.edgeFromId(edge_id))

                c += 1
                if c > 50:
                    ipl.logging('merge_adjacent_objects: Warning: Not finding any more pairs!')
                    finished = True

                elif uid not in used_ids and vid not in used_ids:
                    finished = True
                    used_ids += (uid, vid)
                    merge_ids.append((uid, vid))

        ipl.logging('Label IDs randomly selected for merging: {}', merge_ids)

        # Sort
        merge_ids = [sorted(x) for x in merge_ids]
        merge_ids = sorted(merge_ids)
        ipl.logging('merge_ids = {}', merge_ids)

        # Store this for later use
        ipl[targetnames[2]] = merge_ids
        ipl[targetnames[3]] = merge_ids

        # # Create change hash list
        # change_hash = IPL(data=dict(zip(np.unique(merge_ids), [[x, ] for x in np.unique(merge_ids)])))
        # for i in xrange(0, 3):
        #     prev_change_hash = IPL(data=change_hash)
        #     for x in merge_ids:
        #         ipl.logging('Adding {} and {}', *x)
        #         change_hash[x[0]] += change_hash[x[1]]
        #         change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
        #         change_hash[x[1]] += change_hash[x[0]]
        #         change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
        #
        # reduce_hash(change_hash)
        # # Change the list in the hash to np-arrays for better storage in h5 files
        # for k, v in change_hash.iteritems():
        #     change_hash[k] = np.array(v)
        # # And now we have a perfect change list which we just need to iterate over and change the labels in the image
        # ipl.logging('change_hash after change:')

        us = [x[0] for x in merge_ids]
        change_hash = IPL(data=dict(zip(us, merge_ids)))

        ipl.logging('change_hash: {}', change_hash)
        ipl[targetnames[4]] = change_hash

    elif algorithm == 'standard':

        # Get only the relevant labels
        labels = lib.unique(ipl[key])
        ipl.logging('labels = {}', labels)

        # Seed the randomize function
        random.seed(seed)

        ipl.astype(np.uint32, keys=key)
        (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
        edge_ids = rag.edgeIds()
        # ipl.logging('Edge ids: {}', edge_ids)

        # Type 1:
        # Select edges by size (smallest edges)
        ipl.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
        edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
        # ifp.logging('edgelen_ids = {}', edgelen_ids)
        sorted_edgelens = np.sort(rag.edgeLengths())
        #
        smallest_merge_lens = sorted_edgelens[0:numberbysize]
        ipl.logging('Lengths selected for merging: {}', smallest_merge_lens)
        #
        smallest_merge_ids = []
        for x in smallest_merge_lens:
            edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
            smallest_merge_ids.append(edge_id)
            edgelen_ids.pop(edge_id)
        #
        edge_ids = edgelen_ids.keys()
        ipl.logging('Edge IDs selected for merging due to size: {}', smallest_merge_ids)

        # Type 2:
        # Randomly choose edges
        random_merge_ids = random.sample(edge_ids, numberbyrandom)
        ipl.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

        # Now get the label ids
        smallest_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids]
        smallest_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids]
        smallest_merge_labelids = list(zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
        random_merge_labelids_u = [rag.uId(rag.edgeFromId(x)) for x in random_merge_ids]
        random_merge_labelids_v = [rag.vId(rag.edgeFromId(x)) for x in random_merge_ids]
        random_merge_labelids = list(zip(random_merge_labelids_u, random_merge_labelids_v))
        ipl.logging('Label IDs selected for merging by size: {}', smallest_merge_labelids)
        ipl.logging('Label IDs randomly selected for merging: {}', random_merge_labelids)

        # Concatenate
        all_merge_labelids = smallest_merge_labelids + random_merge_labelids
        # Sort
        ipl.logging('all_merge_labelids = {}', all_merge_labelids)
        all_merge_labelids = [sorted(x) for x in all_merge_labelids]
        all_merge_labelids = sorted(all_merge_labelids)
        ipl.logging('all_merge_labelids = {}', all_merge_labelids)

        # Store this for later use
        ipl[targetnames[1]] = smallest_merge_labelids
        ipl[targetnames[2]] = random_merge_labelids
        ipl[targetnames[3]] = all_merge_labelids

        # Create change hash list
        change_hash = IPL(data=dict(zip(np.unique(all_merge_labelids), [[x,] for x in np.unique(all_merge_labelids)])))
        for i in xrange(0, 3):
            prev_change_hash = IPL(data=change_hash)
            for x in all_merge_labelids:
                ipl.logging('Adding {} and {}', *x)
                change_hash[x[0]] += change_hash[x[1]]
                change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
                change_hash[x[1]] += change_hash[x[0]]
                change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
        # This removes the redundancy from the hash
        reduce_hash(change_hash)
        # Change the list in the hash to np-arrays for better storage in h5 files
        for k, v in change_hash.iteritems():
            change_hash[k] = np.array(v)
        # And now we have a perfect change list which we just need to iterate over and change the labels in the image
        ipl.logging('change_hash after change:')
        ipl.logging(change_hash)
        ipl[targetnames[4]] = change_hash

    # Create the merged image
    # ipl.deepcopy_entry('largeobj', targetnames[0])
    ipl.rename_entry(key, targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                ipl.logging('Setting {} to {}!', x, k)
                ipl.filter_values(x, type='eq', setto=k, keys=targetnames[0])

    return ipl
def merge_adjacent_objects(ipl, key, thisparams):
    """
    :param ipl:

    ipl.get_params():

        merge_adjacent_objects
            seed
            numberbysize
            numberbyrandom

        largeobjmnames
            - 'largeobj_merged'
            - 'mergeids_small'
            - 'mergeids_random'
            - 'mergeids_all'
            - 'change_hash'

    :param key: the source key for calculation
    """

    numberbysize = thisparams['numberbysize']
    numberbyrandom = thisparams['numberbyrandom']
    targetnames = params['largeobjmnames']

    # Get only the relevant labels
    labels = lib.unique(ipl[key])
    ipl.logging('labels = {}', labels)

    # Seed the randomize function
    random.seed(thisparams['seed'])

    ipl.astype(np.uint32, keys=key)
    (grag, rag) = graphs.gridRegionAdjacencyGraph(ipl[key], ignoreLabel=0)
    edge_ids = rag.edgeIds()
    # ipl.logging('Edge ids: {}', edge_ids)

    # Type 1:
    # Select edges by size (smallest edges)
    ipl.logging('Number of edgeLengths = {}', len(rag.edgeLengths()))
    edgelen_ids = dict(zip(edge_ids, rag.edgeLengths()))
    # ifp.logging('edgelen_ids = {}', edgelen_ids)
    sorted_edgelens = np.sort(rag.edgeLengths())
    #
    smallest_merge_lens = sorted_edgelens[0:numberbysize]
    ipl.logging('Lengths selected for merging: {}', smallest_merge_lens)
    #
    smallest_merge_ids = []
    for x in smallest_merge_lens:
        edge_id = edgelen_ids.keys()[edgelen_ids.values().index(x)]
        smallest_merge_ids.append(edge_id)
        edgelen_ids.pop(edge_id)
    #
    edge_ids = edgelen_ids.keys()
    ipl.logging('Edge IDs selected for merging due to size: {}',
                smallest_merge_ids)

    # Type 2:
    # Randomly choose edges
    random_merge_ids = random.sample(edge_ids, numberbyrandom)
    ipl.logging('Edge IDs randomly selected for merging: {}', random_merge_ids)

    # Now get the label ids
    smallest_merge_labelids_u = [
        rag.uId(rag.edgeFromId(x)) for x in smallest_merge_ids
    ]
    smallest_merge_labelids_v = [
        rag.vId(rag.edgeFromId(x)) for x in smallest_merge_ids
    ]
    smallest_merge_labelids = list(
        zip(smallest_merge_labelids_u, smallest_merge_labelids_v))
    random_merge_labelids_u = [
        rag.uId(rag.edgeFromId(x)) for x in random_merge_ids
    ]
    random_merge_labelids_v = [
        rag.vId(rag.edgeFromId(x)) for x in random_merge_ids
    ]
    random_merge_labelids = list(
        zip(random_merge_labelids_u, random_merge_labelids_v))
    ipl.logging('Label IDs selected for merging by size: {}',
                smallest_merge_labelids)
    ipl.logging('Label IDs randomly selected for merging: {}',
                random_merge_labelids)

    # Concatenate
    all_merge_labelids = smallest_merge_labelids + random_merge_labelids
    # Sort
    ipl.logging('all_merge_labelids = {}', all_merge_labelids)
    all_merge_labelids = [sorted(x) for x in all_merge_labelids]
    all_merge_labelids = sorted(all_merge_labelids)
    ipl.logging('all_merge_labelids = {}', all_merge_labelids)

    # Store this for later use
    ipl[targetnames[1]] = smallest_merge_labelids
    ipl[targetnames[2]] = random_merge_labelids
    ipl[targetnames[3]] = all_merge_labelids

    # Create change hash list
    change_hash = IPL(data=dict(
        zip(np.unique(all_merge_labelids), [[
            x,
        ] for x in np.unique(all_merge_labelids)])))
    for i in xrange(0, 3):
        prev_change_hash = IPL(data=change_hash)
        for x in all_merge_labelids:
            ipl.logging('Adding {} and {}', *x)
            change_hash[x[0]] += change_hash[x[1]]
            change_hash[x[0]] = list(np.unique(change_hash[x[0]]))
            change_hash[x[1]] += change_hash[x[0]]
            change_hash[x[1]] = list(np.unique(change_hash[x[1]]))
    # This removes the redundancy from the hash
    def reduce(hash):
        br = False
        for k, v in hash.iteritems():
            for x in v:
                if x != k:
                    if x in hash.keys():
                        del hash[x]
                        reduce(hash)
                        br = True
                        break
                    else:
                        br = False
            if br:
                break

    reduce(change_hash)
    # Change the list in the hash to np-arrays for better storage in h5 files
    for k, v in change_hash.iteritems():
        change_hash[k] = np.array(v)
    # And now we have a perfect change list which we just need to iterate over and change the labels in the image
    ipl.logging('change_hash after change:')
    ipl.logging(change_hash)
    ipl[targetnames[4]] = change_hash

    # Create the merged image
    # ipl.deepcopy_entry('largeobj', targetnames[0])
    ipl.rename_entry(key, targetnames[0])
    for k, v in change_hash.iteritems():
        for x in v:
            if x != k:
                ipl.logging('Setting {} to {}!', x, k)
                ipl.filter_values(x, type='eq', setto=k, keys=targetnames[0])

    return ipl