예제 #1
0
파일: features.py 프로젝트: adamgreig/iib
def get_variance(clctx, features, reductions, buf_in):
    """Using the *features* and *reductions* programs, find Var[*buf_in*]."""
    gs, wgs = clctx.gs, clctx.wgs
    buf = cl.Image(clctx.ctx, cl.mem_flags.READ_WRITE, clctx.ifmt, (gs, gs))
    mean = reduction.run_reduction(clctx, reductions.reduction_sum, buf_in)
    mean /= gs * gs
    features.variance(clctx.queue, (gs, gs), (wgs, wgs), buf_in, buf)
    variance = reduction.run_reduction(clctx, reductions.reduction_sum, buf)
    variance /= gs * gs
    variance -= mean ** 2
    buf.release()
    return variance
예제 #2
0
파일: features.py 프로젝트: adamgreig/iib
def get_edges(clctx, features, reductions, blurs, buf_in, summarise=True):
    """
    Using the *features* and *reductions* programs, and *blurs* program with
    sigma=2.0, find all edge pixels in *buf_in* and return the count.
    """
    gs, wgs = clctx.gs, clctx.wgs
    bufa = cl.Image(clctx.ctx, cl.mem_flags.READ_WRITE, clctx.ifmt, (gs, gs))
    bufb = cl.Image(clctx.ctx, cl.mem_flags.READ_WRITE, clctx.ifmt, (gs, gs))
    bufc = cl.Image(clctx.ctx, cl.mem_flags.READ_WRITE, clctx.ifmt, (gs, gs))

    blurs.convolve_x(clctx.queue, (gs, gs), (wgs, wgs), buf_in, bufb)
    blurs.convolve_y(clctx.queue, (gs, gs), (wgs, wgs), bufb, bufa)
    blurs.convolve_x(clctx.queue, (gs, gs), (wgs, wgs), bufa, bufc)
    blurs.convolve_y(clctx.queue, (gs, gs), (wgs, wgs), bufc, bufb)

    features.subtract(clctx.queue, (gs, gs), (wgs, wgs), bufb, bufa, bufc)
    features.edges(clctx.queue, (gs, gs), (wgs, wgs), bufc, bufa)
    counts = reduction.run_reduction(clctx, reductions.reduction_sum, bufa)

    if not summarise:
        edges = np.empty((gs, gs, 4), np.float32)
        cl.enqueue_copy(clctx.queue, edges, bufa,
                        origin=(0, 0), region=(gs, gs))

    bufa.release()
    bufb.release()
    bufc.release()

    if summarise:
        return counts
    else:
        return edges
예제 #3
0
파일: features.py 프로젝트: adamgreig/iib
def get_entropy(clctx, features, reductions, buf_in):
    """Using the *features* and *reductions* programs, find H[*buf_in*]."""
    gs, wgs = clctx.gs, clctx.wgs
    buf = cl.Image(clctx.ctx, cl.mem_flags.READ_WRITE, clctx.ifmt, (gs, gs))
    features.entropy(clctx.queue, (gs, gs), (wgs, wgs), buf_in, buf)
    entropy = reduction.run_reduction(clctx, reductions.reduction_sum, buf)
    entropy /= ((gs * gs) / (wgs * wgs))
    buf.release()
    return entropy
예제 #4
0
파일: features.py 프로젝트: adamgreig/iib
def get_blobs(clctx, features, reductions, blurs, buf_in, summarise=True):
    """
    Using the *features* and *reductions* programs, and *blurs* program with
    sigma=sqrt(2.0), find all the blobs in *buf_in* at five scales and return
    the count at each scale.
    """
    counts = np.empty((5, 4), np.float32)
    gs, wgs = clctx.gs, clctx.wgs
    mf = cl.mem_flags
    bufa = cl.Image(clctx.ctx, cl.mem_flags.READ_WRITE, clctx.ifmt, (gs, gs))
    cl.enqueue_copy(clctx.queue, bufa, buf_in, src_origin=(0, 0),
                    dest_origin=(0, 0), region=(gs, gs))
    l_prev, l_curr, g_prev = None, None, bufa

    if not summarise:
        blobs = []

    for i in range(7):
        # Prepare next layer
        d = gs // (2**i)
        swg = wgs if wgs <= d else d
        g_blurr = cl.Image(clctx.ctx, mf.READ_WRITE, clctx.ifmt, (d, d))
        g_temp = cl.Image(clctx.ctx, mf.READ_WRITE, clctx.ifmt, (d, d))
        l_next = cl.Image(clctx.ctx, mf.READ_WRITE, clctx.ifmt, (d, d))
        blurs.convolve_x(clctx.queue, (d, d), (swg, swg), g_prev, g_temp)
        blurs.convolve_y(clctx.queue, (d, d), (swg, swg), g_temp, g_blurr)
        features.subtract(clctx.queue, (d, d), (swg, swg),
                          g_blurr, g_prev, l_next)

        # Find blobs in current layer
        if i >= 2:
            d = gs // (2**(i-1))
            swg = wgs if wgs <= d else d
            out = cl.Image(clctx.ctx, mf.READ_WRITE, clctx.ifmt, (d, d))
            features.blobs(clctx.queue, (d, d), (swg, swg),
                           l_prev, l_curr, l_next, out)
            rs = reductions.reduction_sum
            counts[i-2] = reduction.run_reduction(clctx, rs, out)
            if not summarise:
                blobs.append(np.empty((d, d, 4), np.float32))
                cl.enqueue_copy(clctx.queue, blobs[-1], out,
                                origin=(0, 0), region=(d, d))
            out.release()

        # Resize current layer to start the next layer
        d = gs // (2**(i+1))
        swg = wgs if wgs <= d else d
        g_resize = cl.Image(clctx.ctx, mf.READ_WRITE, clctx.ifmt, (d, d))
        features.subsample(clctx.queue, (d, d), (swg, swg), g_blurr, g_resize)

        # Cycle through buffers
        g_blurr.release()
        g_temp.release()
        g_prev.release()
        if l_prev:
            l_prev.release()
        g_prev = g_resize
        l_prev = l_curr
        l_curr = l_next

    if summarise:
        return counts
    else:
        return blobs