コード例 #1
0
ファイル: imgtool.py プロジェクト: dade-ai/sflow
    def _rand_crop3d(*imgs):

        with tf.name_scope(kwargs.pop('name', None), 'rand_crop', list(imgs) + [sz]):
            value = imgs[0]
            size = tf.convert_to_tensor(sz, dtype=tf.int32, name="size")
            shape = tf.shape(value)[:2]

            check = tf.Assert(tf.reduce_all(shape >= size), ["Need value.shape >= size, got", shape, size])
            shape = control_flow_ops.with_dependencies([check], shape)

            # assert same shape
            for v in imgs:
                vshape = tf.shape(v)[:2]
                check = tf.Assert(tf.reduce_all(shape.equal(vshape)),
                                  ["Need same (H,W,?) image.shape[:2] == otherimage.shape[:2], got", shape, vshape])
                shape = control_flow_ops.with_dependencies([check], shape)

            limit = shape - size + 1
            offset = tf.random_uniform(tf.shape(shape), dtype=size.dtype, maxval=size.dtype.max) % limit  # add seed
            # take last dim as-is
            # tf.assert_greater_equal(offset, 0)
            # tf.assert_greater_equal(size, 0)
            offset = offset.append(0)
            size = size.append(-1)

            return tuple(tf.slice(v, offset, size) for v in imgs)
コード例 #2
0
ファイル: xdog.py プロジェクト: dade-ai/sflow
def _test_xdog_pi_epsilon():
    from sflow.sample import astronaut

    img = astronaut(expand=True)
    # img = thumbnail_room(expand=True)
    x = tf.convert_to_tensor(img)

    n = 10
    p = 20.
    sigma = 4
    eps_s = tf.linspace(0.2, 0.9, n)
    pi_s = tf.linspace(0.4, 30.0, n)
    outputs = []
    x = tf.image.rgb_to_grayscale(x)

    for i in range(n):
        for j in range(n):
            out = XDoG(x,
                       p=p,
                       sigma=sigma,
                       window=11,
                       epsilon=eps_s[i],
                       pi=pi_s[j])
            outputs.append(out)

    out = tf.concat(0, outputs)

    sess = tf.default_session()
    o = sess.run(out)

    py.plt.imshow(o, cmap='gray')
    py.plt.plot_pause()
コード例 #3
0
def coordinate2d(t):
    """
    -1.~1. coordinate of x, y, left top = (-1., -1.)
    :param x:
    :return:
    """
    # fixme to 0.10.0
    import numpy as np
    dims = t.dims
    y, x = 1, 2
    x = np.linspace(-1, 1, dims[x])
    y = np.linspace(-1, 1, dims[y])
    xv, yv = np.meshgrid(x, y)
    xbatch = np.tile(xv, [dims[0], 1, 1]).astype('float32')
    ybatch = np.tile(yv, [dims[0], 1, 1]).astype('float32')

    xgrid = tf.convert_to_tensor(xbatch[..., np.newaxis])
    ygrid = tf.convert_to_tensor(ybatch[..., np.newaxis])

    return xgrid, ygrid
コード例 #4
0
ファイル: imgtool.py プロジェクト: dade-ai/sflow
def pad_if_need(image, size, offsets=None):
    """
    :param image: tensor3d[H,W,C]
    :param size: (int, int) targetsize (H,W)
    :param offsets: (0,0) for None
    :return:
    """
    assert image.ndim == 3
    imshape = tf.shape(image)

    # get target shape if possible
    tshape = image.dims
    for i in (0, 1):
        if tshape[i] is not None and size[i] > tshape[i]:
            tshape[i] = size[i]

    targetshape = tf.convert_to_tensor(size).append(imshape[-1])
    need = targetshape - imshape
    # padding need
    need = tf.where(need > 0, need, tf.zeros(tf.shape(need), dtype=tf.int32))
    if offsets is None:
        offsets = [0, 0, 0]
    else:
        offsets = list(offsets)
        offsets.append(0)

    # upper padding = need // 2

    padding_first = need // 2 + tf.convert_to_tensor(offsets)
    padding_left = need - padding_first
    padding = tf.concat(0, [[padding_first], [padding_left]]).T

    out = tf.pad(image, padding, 'CONSTANT')
    # rshape = tf.maximum(imshape, targetshape)

    # if known shape.. set
    out.set_shape(tshape)

    return out
コード例 #5
0
def dropout(x, keep_prob=0.5, is_training=None, noise_shape=None, seed=None):

    if keep_prob == 1.0:
        return x

    def _dropout():
        return tf.nn.dropout(x, keep_prob, noise_shape, seed)

    if is_training is None:
        is_training = x.graph.is_training
    else:
        is_training = tf.convert_to_tensor(is_training)
    return tf.cond(is_training, _dropout, lambda: x)
コード例 #6
0
ファイル: xdog.py プロジェクト: dade-ai/sflow
def XDoG(x, p, pi=None, epsilon=None, sigma=1., k=1.6, window=3, name=None):
    """
    see : http://www.kyprianidis.com/p/cag2012/winnemoeller-cag2012.pdf

    example style:

        woodcut :  extreme edge emphasis settings to produce
        shape abstraction, and long, coherent carving-cuts (ϕ ≫ 0.01, σc ≈ 5 and p ≈ 100)

    Appendix A lists complete settings for many of our results,
    demonstrating the range over which we have found it useful
    to vary the XDoG parameters. We have found that choosing
    ε close to the midtone greyvalue of the image and p near 20,
    tends to lead to interesting stylizations;  though some specialized
    styles require much larger p values. The soft thresholding
    steepness parameter ϕ varies more widely. Because it controls
    the slope of the falloff, when ϕ is close to zero it is very sensitive
    to small changes, while the parameter becomes much less
    sensitive to small changes as it increases.

    :param x: image [bhwc]
    :param p: XDoG parameter p > 0.
    :param pi: threshold_ramp parameter, slope
    :param epsilon: threshold_ramp parameter. (0, 1), thresholding value
    :param sigma: DoG parameter p > 0.
    :param k: DoG parameter. k * sigma
    :param window: window size for gaussian filter
    :param name:
    :return:
    """

    # An eXtended difference-of-Gaussians
    # Reparameterization of the XDoG equation(7)
    # S(x; sigma, k, p) = G(x; sigma) + p*D(x; sigma, k)
    #                   = (1+p) * G(x; sigma) - p * G(x; k*sigma)

    k = tf.convert_to_tensor(k, dtype=tf.float32)
    g1 = gaussian_blur(x, window, sigma, padding='SAME')
    g2 = gaussian_blur(x, window, k * sigma, padding='SAME')

    out = (1. + p) * g1 - p * g2
    if epsilon is None and sigma is None:
        return out
    elif epsilon is not None and sigma is not None:
        return threshold_ramp(out, pi, epsilon, name=name or 'XDoG')
    else:
        raise ValueError(
            'XDoG need both(for thresholding) or neither of (epsilon, pi), but epsilon {}, '
            'pi {}'.format(epsilon, pi))
コード例 #7
0
ファイル: imgtool.py プロジェクト: dade-ai/sflow
    def _crop_center_one(imgs, name=None):
        size = tf.convert_to_tensor(sz, dtype=tf.int32, name="size")
        hw = tf.shape(imgs)[-3:-1]

        # no gpu support
        # check = tf.Assert(tf.reduce_all(hw >= size),
        #                   ['Need crop size less than tensor tensor.shape[-3:-1] >= cropsize, got', hw, size])
        # hw = control_flow_ops.with_dependencies([check], hw)

        offset = (hw - size) // 2
        if imgs.ndim == 3:
            offset = tf.concat(0, [offset, [0]])
            size = tf.concat(0, [size, [-1]])
        if imgs.ndim == 4:
            offset = tf.concat(0, [[0], offset, [0]])
            size = tf.concat(0, [[-1], size, [-1]])

        return tf.slice(imgs, offset, size, name=name)
コード例 #8
0
ファイル: imgtool.py プロジェクト: dade-ai/sflow
def imread(fpath, size=None, dtype='float32'):
    # read image as tensor
    import sflow.python.fileutil as py
    img = py.imread(fpath, size=size, expand=True, dtype=dtype)

    # from skimage import io, transform
    # img = io.imread(fpath)
    # if size is not None:
    #     sz = list(img.shape)
    #     sz[:len(size)] = size
    #     img = transform.resize(img, sz, preserve_range=True)
    # img = img.astype('float32') / 255.
    # img = tf.convert_to_tensor(img)
    # img = img.expand_dims(0)
    # if img.ndim == 3:
    #     img = img.expand_dims(-1)
    # return img

    return tf.convert_to_tensor(img)
コード例 #9
0
    def _wraped(expand=False, size=None, dtype='float32', tensor=False):
        from skimage import io, transform

        img = f()
        if size is not None:
            sz = list(img.shape)
            sz[:len(size)] = size
            img = transform.resize(img, sz, preserve_range=True)
        if dtype == 'int8':
            pass
        elif dtype.startswith('float'):
            img = img.astype(dtype) / 255.
        if expand:
            img = np.expand_dims(img, 0)
            if img.ndim == 3:
                img = np.expand_dims(img, -1)
        if tensor:
            img = tf.convert_to_tensor(img)
        return img
コード例 #10
0
ファイル: imgtool.py プロジェクト: dade-ai/sflow
    def _rand_crop_offsets(*imgs):
        with tf.name_scope(None, 'rand_crop', list(imgs) + [sz]):
            value = imgs[0]
            size = tf.convert_to_tensor(sz, dtype=tf.int32, name="size")
            shape = tf.shape(value)[1:3]  # HW of BHWC

            check = tf.Assert(tf.reduce_all(shape >= size), ["Need value.shape >= size, got", shape, size])
            shape = control_flow_ops.with_dependencies([check], shape)

            # assert same shape
            for v in imgs:
                vshape = tf.shape(v)[1:3]  # assert v.ndim == 4
                check = tf.Assert(tf.reduce_all(shape.equal(vshape)),
                                  ["Need same (H,W,?) image.shape[1:3] == otherimage.shape[1:3], got", shape, vshape])
                shape = control_flow_ops.with_dependencies([check], shape)

            limit = shape - size + 1
            if value.dims[0] is None:
                batchshape = tf.shape(value)[:1].append(2)
            else:
                batchshape = (value.dims[0], 2)

            offsets = tf.random_uniform(batchshape, dtype=size.dtype, maxval=size.dtype.max) % limit  # add seed
            # offsets = tf.random_uniform(batchshape, maxval=limit, dtype=tf.int32)

            # sz = size
            size = size.append(-1)

            def _3d_crop(args):
                values, offset = args
                offset = offset.append(0)
                # outs = [tf.slice(img, offset, size) for img in values]
                outs = []
                for img in values:
                    out = tf.slice(img, offset, size)
                    out.set_shape(list(sz)+v.dims[-1:])
                    outs.append(out)
                return outs

            return tf.map_fn(_3d_crop, [imgs, offsets], dtype=[v.dtype for v in imgs]), offsets
コード例 #11
0
ファイル: xdog.py プロジェクト: dade-ai/sflow
def _test_xdog_p_sigmas():
    from sflow.sample import astronaut

    img = astronaut(expand=True)
    x = tf.convert_to_tensor(img)

    n = 5
    sigmas = tf.linspace(1.0, 10.0, n)
    p_s = tf.linspace(0.1, 4.0, n)
    outputs = []
    for i in range(n):
        for j in range(n):
            out = XDoG(x, p=p_s[i], sigma=sigmas[j], window=11)
            out = tf.image.rgb_to_grayscale(out)
            out = tf.where(out > 0.6, tf.ones_like(out), tf.zeros_like(out))
            outputs.append(out)

    outputs = tf.concat(0, outputs)

    sess = tf.default_session()
    out = sess.run(outputs)

    py.plt.imshow(out, cmap='gray')
    py.plt.plot_pause()
コード例 #12
0
ファイル: improcess.py プロジェクト: dade-ai/sflow
def _standarize_filter(f, name=None):
    f = tf.convert_to_tensor(f, dtype=tf.float32)
    if f.ndim == 2:
        f = f.expand_dims(-1).expand_dims(-1, name=name)
    return f
コード例 #13
0
ファイル: transforms.py プロジェクト: dade-ai/sflow
def sampling_xy_3r(img, xys, outsize=None, oob=None):
    """
    differentiable image sampling (with interpolation)
    :param img: source image [HWC]
    :param xys: source coord [2, H'*W'] if outsize given
    :param outsize: [H',W'] or None, xys must has rank3
    :return: [B,H',W',C]
    """
    assert img.ndim == 3

    oobv = oob
    if oobv is None:
        # oobv = tf.zeros(shape=(img.dims[-1]), dtype=tf.float32)  # [0., 0., 0.]
        oobv = 0.
        # oobv = [0., 0., 0.]
    oobv = tf.convert_to_tensor(oobv)

    if outsize is None:
        outsize = tf.shape(xys)[1:]
        xys = xys.flat2d()

    H, W, C = img.shapes
    WH = tf.stack([W, H]).to_float().reshape((2, 1))

    # XYf = (xys + 1.) * WH * 0.5  # scale to HW coord ( + 1 for start from 0)
    XYf = (xys +
           0.5) * WH  # * 0.5  # scale to HW coord ( + 1 for start from 0)
    XYS = tf.ceil(XYf)  # left top weight

    # prepare weights
    w00 = XYS - XYf  # [2, p]
    w11 = 1. - w00  # [2, p]

    # get near 4 pixels per pixel
    XYS = XYS.to_int32()  # [2, p]  # todo check xy order
    XYs = XYS - 1
    Xs = tf.stack([XYs[0], XYS[0]])
    Ys = tf.stack([XYs[1], XYS[1]])

    # get mask of outof bound
    # leave option for filling value
    Xi = Xs.clip_by_value(0, W - 1)
    Yi = Ys.clip_by_value(0, H - 1)

    inb = tf.logical_and(Xi.equal(Xs), Yi.equal(Ys))  # [2, p]
    inb = tf.reduce_any(inb, axis=0, keepdims=True)  # all oob? [1, p]-
    # inb = inb.expand_dims(2).to_float()  # [1, p]
    inb = inb.reshape((-1, 1)).to_float()  # [p, 1] 1 for channel

    # get 4 pixels  [p, C]
    p00 = getpixel(img, tf.stack([Yi[0], Xi[0]]).T)
    p01 = getpixel(img, tf.stack([Yi[0], Xi[1]]).T)
    p10 = getpixel(img, tf.stack([Yi[1], Xi[0]]).T)
    p11 = getpixel(img, tf.stack([Yi[1], Xi[1]]).T)

    # stacked nearest : [4, p, C]
    near4 = tf.stack([p00, p01, p10, p11], axis=0)

    # XYw : 4 near point weights [4, pixel]
    w4 = tf.stack([
        w00[1] * w00[0],  # left top
        w00[1] * w11[0],  # right top
        w11[1] * w00[0],  # left bottom
        w11[1] * w11[0]
    ])  # right bottom
    # weighted sum of 4 nearest pixels broadcasting
    w4 = w4.reshape((4, -1, 1))
    # interpolated = tf.sum(w4 * near4.to_float(), axis=1)  # [p, C]
    interpolated = tf.sum(w4 * near4.to_float(), axis=0)  # [p, C]

    # assign oob value
    # fill oob by broadcasting
    oobv = oobv.reshape((1, -1))  # [p, C]
    interpolated = interpolated * inb + oobv * (1. - inb)

    output = interpolated.reshape((outsize[0], outsize[1], C))
    # reshape [p, C] => [H', W', C]

    return output
コード例 #14
0
ファイル: transforms.py プロジェクト: dade-ai/sflow
        return tf.map_fn(lambda x: _getpixel_3r(x, ind),
                         img,
                         name=name or 'getpixel')
    else:
        raise ValueError("need 2d support in getpixel?")


# endregion

# todo add more example

if __name__ == '__main__':
    from skimage.data import coffee
    import matplotlib.pyplot as plt

    img = coffee()
    img = tf.convert_to_tensor(img / 255., dtype=tf.float32)
    img = tf.expand_dims(img, 0)

    theta = tf.convert_to_tensor([[0.5, 0., 0.], [0., 0.5, 0.]])
    t = transform(img, theta)

    plt.subplot(1, 2, 1)
    plt.imshow(img[0].eval())

    plt.subplot(1, 2, 2)
    plt.imshow(t[0].eval())

    plt.show()
    print('done')