Exemple #1
0
def cluster_derts(dert__, crit__, mask, verbose=False, **kwargs):

    # this is function should be folded into flood_fill()
    ''' obsolete define clustering criterion:
    if fia:      # input is from comp_a
        if fca:  # comp_a eval by g / cos(ga)
            crit__ = dert__[3] / np.cos(dert__[7]) - Ave  # combined value, no comp_r: no use for ma?
        else:    # P_blobs eval by g * cos(ga)
            crit__ = dert__[3] * np.cos(dert__[7]) - Ave  # separate from g and ga
    else:        # input is from comp_r
        if fca:  # comp_a eval by g
            crit__ = dert__[3] - Ave
        else:    # comp_r eval by m
            crit__ = dert__[4] - Ave
    '''
    if kwargs.get('use_c'):
        raise NotImplementedError
        (_, _, _, blob_, _), idmap, adj_pairs = flood_fill()
    else:
        blob_, idmap, adj_pairs = flood_fill(dert__,
                                             sign__=crit__ > 0,
                                             verbose=verbose,
                                             mask=mask,
                                             blob_cls=CDeepBlob,
                                             accum_func=accum_blob_Dert)

    assign_adjacents(adj_pairs, CDeepBlob)
    if kwargs.get('render', False):
        visualize_blobs(idmap,
                        blob_,
                        winname=f"Deep blobs (fca = {fca}, fia = {fia})")

    return blob_
Exemple #2
0
def derts2blobs(dert__, verbose=False, render=False, use_c=False):

    if verbose: start_time = time()

    if use_c:
        dert__ = dert__[0], np.empty(0), np.empty(0), *dert__[1:], np.empty(0)
        frame, idmap, adj_pairs = wrapped_flood_fill(dert__)
    else:
        blob_, idmap, adj_pairs = flood_fill(dert__,
                                             sign__=dert__[3] > 0,
                                             verbose=verbose)
        I, Dy, Dx, G, M = 0, 0, 0, 0, 0
        for blob in blob_:
            I += blob.Dert.I
            Dy += blob.Dert.Dy
            Dx += blob.Dert.Dx
            G += blob.Dert.G
            M += blob.Dert.M
        frame = FrameOfBlobs(I=I,
                             Dy=Dy,
                             Dx=Dx,
                             G=G,
                             M=M,
                             blob_=blob_,
                             dert__=dert__)

    assign_adjacents(adj_pairs)  # f_segment_by_direction=False

    if verbose:
        print(
            f"{len(frame.blob_)} blobs formed in {time() - start_time} seconds"
        )
    if render: visualize_blobs(idmap, frame.blob_)

    return frame
Exemple #3
0
def cluster_derts(dert__, mask, Ave, fcr, fig, verbose=False, **kwargs):

    if fcr:  # comp_r output;  form clustering criterion:
        if fig:
            crit__ = dert__[0] + dert__[
                6] - Ave  # eval by i + m, accum in rng; dert__[:,:,0] if not transposed
        else:
            crit__ = Ave - dert__[3]  # eval by -g, accum in rng
    else:  # comp_g output
        crit__ = dert__[
            6] - Ave  # comp_g output eval by m, or clustering is always by m?
    if kwargs.get('use_c'):
        raise NotImplementedError
        (_, _, _, blob_, _), idmap, adj_pairs = flood_fill()
    else:
        blob_, idmap, adj_pairs = flood_fill(dert__,
                                             sign__=crit__ > 0,
                                             verbose=verbose,
                                             mask=mask,
                                             blob_cls=CDeepBlob,
                                             accum_func=accum_blob_Dert)

    assign_adjacents(adj_pairs, CDeepBlob)
    if kwargs.get('render', False):
        visualize_blobs(idmap,
                        blob_,
                        winname=f"Deep blobs (fcr = {fcr}, fig = {fig})")

    return blob_
def segment_by_direction(iblob, **kwargs):

    verbose = kwargs.get('verbose')
    render = kwargs.get('render')

    dert__ = list(iblob.dert__)
    mask__ = iblob.mask__
    dy__ = dert__[1]
    dx__ = dert__[2]

    # segment blob into primarily vertical and horizontal sub blobs according to the direction of kernel-level gradient:
    dir_blob_, idmap, adj_pairs = \
        flood_fill(dert__, abs(dy__) > abs(dx__), verbose=verbose, mask__=mask__, blob_cls=CBlob, fseg=True, accum_func=accum_dir_blob_Dert)
    assign_adjacents(adj_pairs, CBlob)  # fseg=True: skip adding the pose

    if render:
        _dir_blob_ = deepcopy(
            dir_blob_
        )  # get a copy for dir blob before merging, for visualization purpose

    merged_ids = [
    ]  # ids of merged adjacent blobs, to skip in the rest of dir_blobs

    for i, blob in enumerate(dir_blob_):
        if blob.id not in merged_ids:
            blob = merge_adjacents_recursive(blob,
                                             merged_ids,
                                             blob.adj_blobs[0],
                                             strong_adj_blobs=[])  # no pose

            if (blob.Dert.M >
                    ave_M) and (blob.box[1] - blob.box[0] >
                                1):  # y size >1, else we can't form derP
                blob.fsliced = True
                slice_blob(
                    blob, verbose
                )  # slice and comp_slice_ across directional sub-blob
            iblob.dir_blobs.append(blob)

        for dir_blob in iblob.dir_blobs:
            if dir_blob.id in merged_ids:  # strong blob was merged to another blob, remove it
                iblob.dir_blobs.remove(dir_blob)

        if render:
            visualize_merging_process(iblob, dir_blob_, _dir_blob_, mask__, i)
    if render:
        # for debugging: visualize adjacents of merged blob to see that adjacents are assigned correctly after the merging:
        if len(dir_blob_) > 50 and len(dir_blob_) < 500:
            new_idmap = (np.zeros_like(idmap).astype('int')) - 2
            for blob in iblob.dir_blobs:
                y0, yn, x0, xn = blob.box
                new_idmap[y0:yn, x0:xn] += (~blob.mask__) * (blob.id + 2)

            visualize_merging_process(iblob, dir_blob_, _dir_blob_, mask__, 0)
            from draw_frame_blobs import visualize_blobs
            visualize_blobs(new_idmap, iblob.dir_blobs)
Exemple #5
0
def cluster_sub_eval(blob, dert__, sign__, mask__, render,
                     verbose):  # comp_r or comp_a eval per sub_blob:

    AveB = aveB * blob.rdn
    sub_blobs, idmap, adj_pairs = flood_fill(dert__,
                                             sign__,
                                             verbose=False,
                                             mask__=mask__,
                                             blob_cls=CBlob)
    assign_adjacents(adj_pairs, CBlob)

    if render:
        visualize_blobs(
            idmap,
            sub_blobs,
            winname=
            f"Deep blobs (f_comp_a = {blob.f_comp_a}, f_root_a = {blob.prior_forks[-1] == 'a'})"
        )

    blob.Ls = len(sub_blobs)  # for visibility and next-fork rdn
    blob.sublayers = [sub_blobs]  # 1st layer of sub_blobs

    for sub_blob in sub_blobs:  # evaluate sub_blob
        sub_blob.prior_forks = blob.prior_forks.copy(
        )  # increments forking sequence: m->r, g->a, a->p
        # min size in y and x, at least one dert in dert__:
        if sub_blob.mask__.shape[0] > 2 and sub_blob.mask__.shape[
                1] > 2 and False in sub_blob.mask__:
            # p fork:
            if sub_blob.prior_forks[-1] == 'a':
                if -sub_blob.M * sub_blob.Ma > AveB * pcoef:
                    sub_blob.prior_forks.extend('p')
                    if verbose: print('\nslice_blob fork\n')
                    segment_by_direction(sub_blob, verbose=True)
            else:  # a fork or r fork:
                '''
                G = blob.G  # Gr, Grr...
                adj_M = blob.adj_blobs[3]  # adj_M is incomplete, computed within current dert_only, use root blobs instead:
                adjacent valuable blobs of any sign are tracked from frame_blobs to form borrow_M?
                track adjacency of sub_blobs: wrong sub-type but right macro-type: flat blobs of greater range?
                G indicates or dert__ extend per blob G?
                borrow_M = min(G, adj_M / 2): usually not available, use average
                '''
                if -sub_blob.M > AveB:  # replace with borrow_M when known
                    # comp_a:
                    sub_blob.rdn = sub_blob.rdn + 1 + 1 / blob.Ls
                    blob.sublayers += comp_angle(sub_blob, render, verbose)

                elif sub_blob.M > AveB:
                    # comp_r:
                    sub_blob.rng = blob.rng
                    sub_blob.rdn = sub_blob.rdn + 1 + 1 / blob.Ls
                    blob.sublayers += comp_range(sub_blob, render, verbose)
Exemple #6
0
def cluster_sub_eval(blob, dert__, sign__, mask__,
                     **kwargs):  # comp_r or comp_a eval per sub_blob:

    AveB = aveB * blob.rdn

    sub_blobs, idmap, adj_pairs = flood_fill(dert__,
                                             sign__,
                                             verbose=False,
                                             mask__=mask__,
                                             blob_cls=CBlob,
                                             accum_func=accum_blob_Dert)
    assign_adjacents(adj_pairs, CBlob)

    if kwargs.get('render', False):
        visualize_blobs(
            idmap,
            sub_blobs,
            winname=
            f"Deep blobs (f_comp_a = {blob.f_comp_a}, f_root_a = {blob.f_root_a})"
        )

    blob.Ls = len(sub_blobs)  # for visibility and next-fork rdn
    blob.sub_layers = [sub_blobs]  # 1st layer of sub_blobs

    for sub_blob in sub_blobs:  # evaluate sub_blob
        G = blob.G  # Gr, Grr...
        adj_M = blob.adj_blobs[
            3]  # adj_M is incomplete, computed within current dert_only, use root blobs instead:
        # adjacent valuable blobs of any sign are tracked from frame_blobs to form borrow_M?
        # track adjacency of sub_blobs: wrong sub-type but right macro-type: flat blobs of greater range?
        # G indicates or dert__ extend per blob G?

        borrow_M = min(G, adj_M / 2)
        sub_blob.prior_forks = blob.prior_forks.copy(
        )  # increments forking sequence: g->a, g->a->p, etc.

        if sub_blob.G > AveB:  # replace with borrow_M when known
            # comp_a:
            sub_blob.f_root_a = 1
            sub_blob.a_depth += blob.a_depth  # accumulate a depth from blob to sub_blob, currently not used
            sub_blob.rdn = sub_blob.rdn + 1 + 1 / blob.Ls
            blob.sub_layers += intra_blob(sub_blob, **kwargs)

        elif sub_blob.M - borrow_M > AveB:
            # comp_r:
            sub_blob.rng = blob.rng * 2
            sub_blob.rdn = sub_blob.rdn + 1 + 1 / blob.Ls
            blob.sub_layers += intra_blob(sub_blob, **kwargs)
Exemple #7
0
def cluster_fork_recursive(blob, spliced_layers, new_dert__, sign__,
                           new_mask__, verbose, render, fBa):

    if verbose:
        if fBa: print('\na fork\n')
        else: print('\nr fork\n')
    # form sub_blobs:
    sub_blobs, idmap, adj_pairs = flood_fill(new_dert__,
                                             sign__,
                                             verbose=False,
                                             mask__=new_mask__.fill(False))
    '''
    adjust per average sub_blob, depending on which fork is weaker, or not taken at all:
    sub_blob.rdn += 1 -|+ min(sub_blob_val, alt_blob_val) / max(sub_blob_val, alt_blob_val):
    + if sub_blob_val > alt_blob_val, else -?  
    '''
    adj_rdn = ave_nsub - len(
        sub_blobs
    )  # adjust ave cross-layer rdn to actual rdn after flood_fill:
    blob.rdn += adj_rdn
    for sub_blob in sub_blobs:
        sub_blob.rdn += adj_rdn
    assign_adjacents(adj_pairs)
    if render:
        visualize_blobs(
            idmap,
            sub_blobs,
            winname=
            f"Deep blobs (froot_Ba = {blob.fBa}, froot_Ba = {blob.prior_forks[-1] == 'a'})"
        )

    if fBa: sublayers = blob.asublayers
    else: sublayers = blob.rsublayers

    sublayers += [
        sub_blobs
    ]  # r|a fork- specific sub_blobs, then add deeper layers of mixed-fork sub_blobs:
    sublayers += intra_blob_root(
        blob, render, verbose,
        fBa)  # recursive eval cross-comp range| angle| slice per blob

    spliced_layers = [
        spliced_layer + sublayer for spliced_layer, sublayer in zip_longest(
            spliced_layers, sublayers, fillvalue=[])
    ]
    return spliced_layers
Exemple #8
0
def frame_blobs_root(image,
                     intra=False,
                     render=False,
                     verbose=False,
                     use_c=False):

    if verbose: start_time = time()
    dert__ = comp_pixel(image)

    blob_, idmap, adj_pairs = flood_fill(
        dert__, sign__=ave - dert__[3] > 0, verbose=verbose
    )  # dert__[3] is g, https://en.wikipedia.org/wiki/Flood_fill
    assign_adjacents(adj_pairs)  # forms adj_blobs per blob in adj_pairs
    I, Dy, Dx = 0, 0, 0
    for blob in blob_:
        I += blob.I
        Dy += blob.Dy
        Dx += blob.Dx
    frame = CBlob(I=I,
                  Dy=Dy,
                  Dx=Dx,
                  dert__=dert__,
                  prior_forks=["g"],
                  rlayers=[blob_])  # dlayers = []: no comp_a yet

    if verbose:
        print(
            f"{len(frame.rlayers[0])} blobs formed in {time() - start_time} seconds"
        )
    if render: visualize_blobs(idmap, frame.rlayers[0])

    if intra:  # omit for testing frame_blobs without intra_blob
        if verbose: print("\rRunning frame's intra_blob...")
        from intra_blob import intra_blob_root

        frame.rlayers += intra_blob_root(
            frame, render, verbose,
            fBa=0)  # recursive eval cross-comp range| angle| slice per blob
        # sublayers[0] is fork-specific, deeper sublayers combine sub-blobs of both forks
    '''
    if use_c:  # old version, no longer updated:
        dert__ = dert__[0], np.empty(0), np.empty(0), *dert__[1:], np.empty(0)
        frame, idmap, adj_pairs = wrapped_flood_fill(dert__)
    '''
    return frame