Esempio n. 1
0
def get_s(J, gammaS=1):

    h, w = J.shape
    line_len_double = float(min(h, w)) / line_len_divisor

    line_len = int(line_len_double)
    line_len += line_len % 2

    half_line_len = line_len / 2

    # compute the image gradient 'Imag'
    dJ = im2double(J)
    Ix = np.column_stack((abs(dJ[:, 0:-1] - dJ[:, 1:]), np.zeros((h, 1))))
    Iy = np.row_stack((abs(dJ[0:-1, :] - dJ[1:, :]), np.zeros((1, w))))
    # eq.1
    Imag = np.sqrt(Ix * Ix + Iy * Iy)

    # Image.fromarray((1 - Imag) * 255).show()

    # create the 8 directional line segments L

    L = np.zeros((line_len, line_len, 8))
    for n in range(8):
        if n == 0 or n == 1 or n == 2 or n == 7:
            for x in range(0, line_len):
                y = round(
                    ((x + 1) - half_line_len) * math.tan(math.pi / 8 * n))
                y = half_line_len - y
                if 0 < y <= line_len:
                    L[int(y - 1), x, n] = 1
                if n < 7:
                    L[:, :, n + 4] = rot90c(L[:, :, n])
    L[:, :, 3] = rot90(L[:, :, 7])

    G = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):
        G[:, :, n] = signal.convolve2d(Imag, L[:, :, n], "same")  # eq.2

    Gindex = G.argmax(axis=2)
    # C is map set
    C = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):

        C[:, :, n] = Imag * (1 * (Gindex == n))

    # line shaping
    # generate lines at each pixel
    Spn = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):
        Spn[:, :, n] = signal.convolve2d(C[:, :, n], L[:, :, n], "same")

    Sp = Spn.sum(axis=2)
    Sp = (Sp - Sp[:].min()) / (Sp[:].max() - Sp[:].min())
    S = (1 - Sp)**gammaS

    img = Image.fromarray(S * 255)
    # img.show()

    return S
Esempio n. 2
0
def read_imagefile_into_rgb(filepath, use_uint8=False):
    image = imread(str(filepath), mode='RGB')

    if image.ndim > 3 and image.shape[3] > 1:
        image = image[:, :, :, 1]

    if use_uint8:
        image = im2uint8(image)
    else:
        image = im2double(image)

    assert image.shape[2] == 3
    return image
Esempio n. 3
0
    def deepen(self, image):
        """Given a depth model and an image, produce an corresponding depth-image."""
        if isinstance(image, Path):
            print('Processing image %s\n' % (str(image)))
            image = read_imagefile_into_rgb(image)

        if self.max_img_edge > 0:
            image = immaxedge(image, self.max_img_edge)

        superpixels = SuperPixels(image, self.avg_sp_size)
        superpixels.find_adjacencies()

        pairwise = gen_pairwise_feature_diffs(superpixels)
        sp_depths = self.evaluate(image, superpixels, pairwise)
        return my_fill_depth_colorization(
            im2double(image), sp_depths.astype(float),
            gen_sp_centroid(superpixels)).astype(float)
Esempio n. 4
0
def get_tone(J, type, gammaI=1):
    '''
    tone rendering
    Args:
        J:   灰度图片(矩阵)
        type: 图片类型
        gammaI: 深浅控制参数
    Returns:
        T: 输入图片的色调,类型为矩阵
    '''
    # 直方图匹配
    Jadjusted = match(J, type=type)**gammaI
    # texture,铅笔画匹配图
    texture = Image.open(texture_file_name)
    texture = np.array(texture.convert("L"))
    # texture = np.array(texture)
    texture = texture[99:texture.shape[0] - 100, 99:texture.shape[1] - 100]

    ratio = texture_resize_ratio * min(J.shape[0], J.shape[1]) / float(1024)
    texture_resize = interpolation.zoom(texture, (ratio, ratio))
    texture = im2double(texture_resize)
    htexture = horizontal_stitch(texture, J.shape[1])
    Jtexture = vertical_stitch(htexture, J.shape[0])

    size = J.shape[0] * J.shape[1]

    nzmax = 2 * (size - 1)
    i = np.zeros((nzmax, 1))
    j = np.zeros((nzmax, 1))
    s = np.zeros((nzmax, 1))
    for m in range(1, nzmax + 1):
        i[m - 1] = round(math.ceil((m + 0.1) / 2)) - 1
        j[m - 1] = round(math.ceil((m - 0.1) / 2)) - 1
        s[m - 1] = -2 * (m % 2) + 1
    dx = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))

    nzmax = 2 * (size - J.shape[1])
    i = np.zeros((nzmax, 1))
    j = np.zeros((nzmax, 1))
    s = np.zeros((nzmax, 1))
    for m in range(1, nzmax + 1):
        i[m - 1, :] = round(math.ceil((m - 1 + 0.1) / 2) +
                            J.shape[1] * (m % 2)) - 1
        j[m - 1, :] = math.ceil((m - 0.1) / 2) - 1
        s[m - 1, :] = -2 * (m % 2) + 1
    dy = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))

    Jtexture1d = np.log(np.reshape(Jtexture.T,
                        (1, Jtexture.size), order="f") + 0.01)
    Jtsparse = spdiags(Jtexture1d, 0, size, size)
    Jadjusted1d = np.log(np.reshape(Jadjusted.T,
                         (1, Jadjusted.size), order="f").T + 0.01)

    nat = Jtsparse.T.dot(Jadjusted1d)  # lnJ(x)
    a = np.dot(Jtsparse.T, Jtsparse)
    b = dx.T.dot(dx)
    c = dy.T.dot(dy)
    mat = a + Lambda * (b + c)  # lnH(x)

    beta1d = spsolve(mat, nat)
    beta = np.reshape(beta1d, (J.shape[0], J.shape[1]), order="c")
    # 模拟铅笔来回“刷刷刷”,重复画beta次
    T = Jtexture**beta
    T = (T - T.min()) / (T.max() - T.min())

    img = Image.fromarray(T * 255)

    return T
Esempio n. 5
0
def get_stroke(J, gammaS=1):
    '''
    得到stroke structure
    Args:
        J:  灰度图片(矩阵)
        gammaS:   线条粗细控制参数
    Returns
        S: 输入图片的笔画结构,类型为矩阵
    '''
    h, w = J.shape
    line_len_double = float(min(h, w)) / line_len_divisor

    line_len = round(line_len_double)
    line_len += line_len % 2

    half_line_len = line_len / 2

    # 计算梯度

    dJ = im2double(J)
    Ix = np.column_stack((abs(dJ[:, 0:-1] - dJ[:, 1:]), np.zeros((h, 1))))
    Iy = np.row_stack((abs(dJ[0:-1, :] - dJ[1:, :]), np.zeros((1, w))))
    Imag = np.sqrt(Ix * Ix + Iy * Iy)

    # 分8个方向,L[:, :, index]是用来表示第index+1个方向的线段,作为卷积核
    L = np.zeros((line_len, line_len, 8))
    for n in range(8):
        if n == 0 or n == 1 or n == 2 or n == 7:
            for x in range(0, line_len):
                y = round(
                    ((x + 1) - half_line_len) * math.tan(math.pi / 8 * n))
                y = half_line_len - y
                if 0 < y <= line_len:
                    L[round(y - 1), x, n] = 1
                if n < 7:
                    L[:, :, n + 4] = rot90c(L[:, :, n])
    L[:, :, 3] = rot90(L[:, :, 7])

    G = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):
        # 选取最大值所在的方向
        G[:, :, n] = signal.convolve2d(Imag, L[:, :, n], "same")

    Gindex = G.argmax(axis=2)

    C = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):
        C[:, :, n] = Imag * (1 * (Gindex == n))

    # 将map set C 与方向向量L卷积,在每一个像素上生成线条
    Spn = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):
        Spn[:, :, n] = signal.convolve2d(C[:, :, n], L[:, :, n], "same")
    # 八个方向求和,归一化
    Sp = Spn.sum(axis=2)
    Sp = (Sp - Sp[:].min()) / (Sp[:].max() - Sp[:].min())
    S = (1 - Sp)**gammaS

    img = Image.fromarray(S * 255)

    return S
Esempio n. 6
0
def get_t(J, type, gammaI=1):

    Jadjusted = natural_histogram_matching(J, type=type)**gammaI
    # Jadjusted = natural_histogram_matching(J, type=type)

    texture = Image.open(texture_file_name)
    texture = np.array(texture.convert("L"))
    # texture = np.array(texture)
    texture = texture[99:texture.shape[0] - 100, 99:texture.shape[1] - 100]

    ratio = texture_resize_ratio * min(J.shape[0], J.shape[1]) / float(1024)
    texture_resize = interpolation.zoom(texture, (ratio, ratio))
    texture = im2double(texture_resize)
    htexture = hstitch(texture, J.shape[1])
    Jtexture = vstitch(htexture, J.shape[0])

    size = J.shape[0] * J.shape[1]

    nzmax = 2 * (size - 1)
    i = np.zeros((nzmax, 1))
    j = np.zeros((nzmax, 1))
    s = np.zeros((nzmax, 1))
    for m in range(1, nzmax + 1):
        i[m - 1] = int(math.ceil((m + 0.1) / 2)) - 1
        j[m - 1] = int(math.ceil((m - 0.1) / 2)) - 1
        s[m - 1] = -2 * (m % 2) + 1
    dx = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))

    nzmax = 2 * (size - J.shape[1])
    i = np.zeros((nzmax, 1))
    j = np.zeros((nzmax, 1))
    s = np.zeros((nzmax, 1))
    for m in range(1, nzmax + 1):
        i[m - 1, :] = int(math.ceil((m - 1 + 0.1) / 2) + J.shape[1] *
                          (m % 2)) - 1
        j[m - 1, :] = math.ceil((m - 0.1) / 2) - 1
        s[m - 1, :] = -2 * (m % 2) + 1
    dy = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))

    Jtexture1d = np.log(
        np.reshape(Jtexture.T, (1, Jtexture.size), order="f") + 0.01)
    Jtsparse = spdiags(Jtexture1d, 0, size, size)
    Jadjusted1d = np.log(
        np.reshape(Jadjusted.T, (1, Jadjusted.size), order="f").T + 0.01)

    nat = Jtsparse.T.dot(Jadjusted1d)  # lnJ(x)
    a = np.dot(Jtsparse.T, Jtsparse)
    b = dx.T.dot(dx)
    c = dy.T.dot(dy)
    mat = a + Lambda * (b + c)  # lnH(x)

    # x = spsolve(a,b) <--> a*x = b
    # lnH(x) * beta(x) = lnJ(x) --> beta(x) = spsolve(lnH(x), lnJ(x))

    beta1d = spsolve(mat, nat)  # eq.8
    beta = np.reshape(beta1d, (J.shape[0], J.shape[1]), order="c")

    T = Jtexture**beta  # eq.9
    T = (T - T.min()) / (T.max() - T.min())

    img = Image.fromarray(T * 255)
    # img.show()

    return T
Esempio n. 7
0
def get_s(J, gammaS=1):
    '''
    产生笔画结构(Stroke Structure Generation )
    stroke drawing aims at expressing general structures of the scene

    1. classification:
        首先计算图片的梯度, 分别在x和y两个方向上进行计算, 然后在相应位置求平方和 开根号
        
        因为存在噪声的干扰, 直接使用计算梯度生成的轮廓效果不好
        
        更重要的是,该方法期望可以模拟人类画画的那种效果,比如人类画一条很长的直线,因为无法一次性画成,所以是分段画的,
        该论文希望模拟这种效果。
        
        论文采用的方式是通过预测每个像素点的方向的方式生成线条
        
        一共分成8个方向, 每个45度, 计算得到8个方向向量, 同时作为卷积核
        
        每个像素点的方向是卷积过后8个值里面最大的那个表示的方向, 获得一个map set c, 用1表示方向, 其余为0
    
    2. line shaping:
        
        生成轮廓线条的过程
        
        通过map set c与方向向量做卷积, 将同一个方向上的像素点聚合起来
        
        同时可以将原本梯度图中的边缘像素点连接到线段中

    :param J:   图片转换成灰度后的矩阵
    :gammaS:    控制参数, 值越大线条越粗
    :return:    图片的笔画结构, 轮廓线S
    '''

    h, w = J.shape
    #卷积核大小
    line_len_double = float(min(h, w)) / line_len_divisor

    #使得其为偶数
    line_len = int(line_len_double)
    line_len += line_len % 2

    half_line_len = line_len / 2

    # 计算梯度
    # compute the image gradient 'Imag'
    dJ = im2double(J)
    Ix = np.column_stack((abs(dJ[:, 0:-1] - dJ[:, 1:]), np.zeros((h, 1))))
    Iy = np.row_stack((abs(dJ[0:-1, :] - dJ[1:, :]), np.zeros((1, w))))
    # eq.1
    Imag = np.sqrt(Ix*Ix + Iy*Iy)

    # 注释下面一行代码可以看到通过简单求梯度的方式进行轮廓结构的产生方式, 容易被噪声影响
    # Image.fromarray((1 - Imag) * 255).show()

    # create the 8 directional line segments L
    # L[:, :, index]是一个用来表示第index+1个方向的线段
    # 是一个卷积核
    #由于这个卷积核方向上是对称的,所以可以只赋值一半。
    L = np.zeros((line_len, line_len, 8))
    for n in range(8):
        if n == 0 or n == 1 or n == 2 or n == 7:
            for x in range(0, line_len):
                y = round(((x+1) - half_line_len) * math.tan(math.pi/8*n))
                y = half_line_len - y
                if 0 < y <= line_len:
                    L[int(y-1), x, n] = 1
                if n < 7:
                    L[:, :, n+4] = rot90c(L[:, :, n])
    L[:, :, 3] = rot90(L[:, :, 7])


    # 生成G矩阵
    G = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):
        #做卷积
        G[:, :, n] = signal.convolve2d(Imag, L[:, :, n], "same")    # eq.2

    Gindex = G.argmax(axis=2)   # 获取最大值元素所在的下标 axis表示维度
    # C is map set
    C = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):
        # 八个方向  选取最大值所在的方向
        # eq.3  论文中公式与解释有出入 选取的应该是最大值方向
        C[:, :, n] = Imag * (1 * (Gindex == n))

    # line shaping
    # generate lines at each pixel
    Spn = np.zeros((J.shape[0], J.shape[1], 8))
    for n in range(8):
        Spn[:, :, n] = signal.convolve2d(C[:, :, n], L[:, :, n], "same")

    # 八个方向的求和, 并执行归一化操作
    Sp = Spn.sum(axis=2)
    Sp = (Sp - Sp[:].min()) / (Sp[:].max() - Sp[:].min())
    S = (1 - Sp) ** gammaS

    img = Image.fromarray(S * 255)
    # img.show()

    return S
Esempio n. 8
0
def get_t(J, type, gammaI=1):
    '''
    色调渲染(tone rendering):
    Tone Rendering tone drawing focuses more on shapes, shadow, and shading than on the use of lines
    
    铅笔画的直方图有一定的pattern, 因为只是铅笔和白纸的结合
    可以分成三个区域: 1.亮 2.暗 3.居于中间的部分, 于是就有三个用来模拟的模型
    
    亮的部分使用Laplace分布
    中间的部分使用平均分布
    暗的部分使用高斯分布
    
    随后作者列出了从收集到的简笔画图像中学出来对应的参数
    
    铅笔画的色调 颜色等通过用铅笔重复的涂画来体现

    1. 直方图匹配
        运用三种分布计算图片的直方图, 然后匹配一个正常图片的直方图
    2. 纹理渲染(texture rendering):
        计算模拟需要用铅笔重复涂画的次数beta

    :param J:       图片转换成灰度后的矩阵
    :param type:    图片类型
    :param gammaI:  控制参数, 值越大最后的结果颜色越深
    :return:        色调渲染后的图片矩阵T
    '''

    #直方图匹配
    Jadjusted = natural_histogram_matching(J, type=type) ** gammaI
    # Jadjusted = natural_histogram_matching(J, type=type)

    #将铅笔纹理图,转换到和待处理图一样的大小
    texture = Image.open(texture_file_name)
    texture = np.array(texture.convert("L"))
    # texture = np.array(texture)
    texture = texture[99: texture.shape[0]-100, 99: texture.shape[1]-100]

    ratio = texture_resize_ratio * min(J.shape[0], J.shape[1]) / float(1024)
    texture_resize = interpolation.zoom(texture, (ratio, ratio))
    texture = im2double(texture_resize)
    htexture = hstitch(texture, J.shape[1])
    Jtexture = vstitch(htexture, J.shape[0])

    size = J.shape[0] * J.shape[1]

    nzmax = 2 * (size-1)
    i = np.zeros((nzmax, 1))
    j = np.zeros((nzmax, 1))
    s = np.zeros((nzmax, 1))
    for m in range(1, nzmax+1):
        i[m-1] = int(math.ceil((m+0.1) / 2)) - 1
        j[m-1] = int(math.ceil((m-0.1) / 2)) - 1
        s[m-1] = -2 * (m % 2) + 1
    dx = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))

    nzmax = 2 * (size - J.shape[1])
    i = np.zeros((nzmax, 1))
    j = np.zeros((nzmax, 1))
    s = np.zeros((nzmax, 1))
    for m in range(1, nzmax+1):
        i[m-1, :] = int(math.ceil((m-1+0.1)/2) + J.shape[1] * (m % 2)) - 1
        j[m-1, :] = math.ceil((m-0.1)/2) - 1
        s[m-1, :] = -2 * (m % 2) + 1
    dy = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))

    # +0.01是为了避免出现有0被进行log运算的情况, 但对正常值影响可以被忽略
    Jtexture1d = np.log(np.reshape(Jtexture.T, (1, Jtexture.size), order="f") + 0.01)
    Jtsparse = spdiags(Jtexture1d, 0, size, size)
    Jadjusted1d = np.log(np.reshape(Jadjusted.T, (1, Jadjusted.size), order="f").T + 0.01)


    #构建eq 8,使用Ax=b的形式
    nat = Jtsparse.T.dot(Jadjusted1d)   # lnJ(x)
    a = np.dot(Jtsparse.T, Jtsparse)
    b = dx.T.dot(dx)
    c = dy.T.dot(dy)
    mat = a + Lambda * (b + c)     # lnH(x)

    # x = spsolve(a,b) <--> a*x = b
    # lnH(x) * beta(x) = lnJ(x) --> beta(x) = spsolve(lnH(x), lnJ(x))
    # 使用sparse matrix的spsolve 而不是linalg.solve()
    beta1d = spsolve(mat, nat)  # eq.8
    beta = np.reshape(beta1d, (J.shape[0], J.shape[1]), order="c")

    # 模拟素描时通过重复画线来加深阴影, 用pattern Jtexture重复画beta次
    T = Jtexture ** beta    # eq.9
    T = (T - T.min()) / (T.max() - T.min())

    img = Image.fromarray(T * 255)
    # img.show()

    return T