Beispiel #1
0
def create_laplacian(arr_shape, M):
    Lap = fast_energy_laplacian.gen_grid_laplacian(arr_shape[0], arr_shape[1])
    ## Now repeat Lap #pigments times.
    ## Because the layer values are the innermost dimension,
    ## every entry (i,j, val) in Lap should be repeated
    ## (i*#pigments + k, j*#pigments + k, val) for k in range(#pigments).
    Lap = Lap.tocoo()
    ## Store the shape. It's a good habit, because there may not be a nonzero
    ## element in the last row and column.
    shape = Lap.shape

    ## Fastest
    ks = arange(M)
    rows = (repeat(asarray(Lap.row).reshape(Lap.nnz, 1) * M, M, 1) +
            ks).ravel()
    cols = (repeat(asarray(Lap.col).reshape(Lap.nnz, 1) * M, M, 1) +
            ks).ravel()
    vals = (repeat(asarray(Lap.data).reshape(Lap.nnz, 1), M, 1)).ravel()

    Lap = scipy.sparse.coo_matrix((vals, (rows, cols)),
                                  shape=(shape[0] * M, shape[1] * M)).tocsr()
    return Lap
Beispiel #2
0
def gen_energy_and_gradient(img,
                            layer_colors,
                            weights,
                            img_spatial_static_target=None,
                            scratches=None):
    '''
    Given a rows-by-cols-by-#channels 'img', where channels are the 3 color channels,
    and (#layers+1)-by-#channels 'layer_colors' (the 0-th color is the background color),
    and a dictionary of floating-point or None weights { w_spatial, w_opacity },
    and an optional parameter 'img_spatial_static_target' which are the target values for 'w_spatial_static' (if not flattened, it will be),
    and an optional parameter 'scratches' which should be a dictionary that will be used to store scratch space between calls to this function (use only *if* arguments are the same size),
    returns a tuple of functions:
        ( e, g )
        where e( Y ) computes the scalar energy of a flattened rows-by-cols-by-#layers array of (1-alpha) values,
        and g( Y ) computes the gradient of e.
    '''

    img = asfarray(img)
    layer_colors = asfarray(layer_colors)

    assert len(img.shape) == 3
    assert len(layer_colors.shape) == 2
    assert img.shape[2] == layer_colors.shape[1]

    from pprint import pprint
    # pprint( weights )
    assert set(weights.keys()).issubset(
        set([
            'w_polynomial', 'w_opaque', 'w_spatial_static', 'w_spatial_dynamic'
        ]))

    C = layer_colors
    P = img.reshape(-1, img.shape[2])

    num_layers = C.shape[0] - 1
    Ylen = P.shape[0] * num_layers

    if 'w_spatial_static' in weights:
        assert img_spatial_static_target is not None
        Yspatial_static_target = img_spatial_static_target.ravel()

    if 'w_spatial_dynamic' in weights:
        # print 'Preparing a Laplacian matrix for E_spatial_dynamic...'
        import fast_energy_laplacian
        import scipy.sparse
        # print '    Generating L...'
        LTL = fast_energy_laplacian.gen_grid_laplacian(img.shape[0],
                                                       img.shape[1])
        # print '    Computing L.T*L...'
        # LTL = LTL.T * LTL
        # print '    Replicating L.T*L for all layers...'
        ## Now repeat LTL #layers times.
        ## Because the layer values are the innermost dimension,
        ## every entry (i,j, val) in LTL should be repeated
        ## (i*#layers + k, j*#layers + k, val) for k in range(#layers).
        LTL = LTL.tocoo()
        ## Store the shape. It's a good habit, because there may not be a nonzero
        ## element in the last row and column.
        shape = LTL.shape

        ## There is a "fastest" version below.
        '''
        rows = zeros( LTL.nnz * num_layers, dtype = int )
        cols = zeros( LTL.nnz * num_layers, dtype = int )
        vals = zeros( LTL.nnz * num_layers )
        count = 0
        ks = arange( num_layers )
        for r, c, val in zip( LTL.row, LTL.col, LTL.data ):
            ## Slow
            #for k in range( num_layers ):
            #    rows.append( r*num_layers + k )
            #    cols.append( c*num_layers + k )
            #    vals.append( val )
            
            ## Faster
            rows[ count : count + num_layers ] = r*num_layers + ks
            cols[ count : count + num_layers ] = c*num_layers + ks
            vals[ count : count + num_layers ] = val
            count += num_layers
            
        assert count == LTL.nnz * num_layers
        '''

        ## Fastest
        ks = arange(num_layers)
        rows = (repeat(
            asarray(LTL.row).reshape(LTL.nnz, 1) * num_layers, num_layers, 1) +
                ks).ravel()
        cols = (repeat(
            asarray(LTL.col).reshape(LTL.nnz, 1) * num_layers, num_layers, 1) +
                ks).ravel()
        vals = (repeat(asarray(LTL.data).reshape(LTL.nnz, 1), num_layers,
                       1)).ravel()

        LTL = scipy.sparse.coo_matrix(
            (vals, (rows, cols)),
            shape=(shape[0] * num_layers, shape[1] * num_layers)).tocsr()
        # print '...Finished.'

    if scratches is None:
        scratches = {}

    def e(Y):
        e = 0.

        if 'w_polynomial' in weights:
            e += weights['w_polynomial'] * E_polynomial(Y, C, P, scratches)

        if 'w_opaque' in weights:
            e += weights['w_opaque'] * E_opaque(Y, scratches)

        if 'w_spatial_static' in weights:
            e += weights['w_spatial_static'] * E_spatial_static(
                Y, Yspatial_static_target, scratches)

        if 'w_spatial_dynamic' in weights:
            e += weights['w_spatial_dynamic'] * E_spatial_dynamic(
                Y, LTL, scratches)

        # print 'Y:', Y
        # print 'e:', e

        return e

    ## Preallocate this memory
    gradient_space = [zeros(Ylen), zeros(Ylen)]

    # total_gradient = zeros( Ylen )
    # gradient_term = zeros( Ylen )

    def g(Y):
        total_gradient = gradient_space[0]
        gradient_term = gradient_space[1]

        total_gradient[:] = 0.

        if 'w_polynomial' in weights:
            gradY_E_polynomial(Y, C, P, gradient_term, scratches)
            gradient_term *= weights['w_polynomial']
            total_gradient += gradient_term

        if 'w_opaque' in weights:
            grad_E_opaque(Y, gradient_term, scratches)
            gradient_term *= weights['w_opaque']
            total_gradient += gradient_term

        if 'w_spatial_static' in weights:
            grad_E_spatial_static(Y, Yspatial_static_target, gradient_term,
                                  scratches)
            gradient_term *= weights['w_spatial_static']
            total_gradient += gradient_term

        if 'w_spatial_dynamic' in weights:
            grad_E_spatial_dynamic(Y, LTL, gradient_term, scratches)
            gradient_term *= weights['w_spatial_dynamic']
            total_gradient += gradient_term

        # print 'Y:', Y
        # print 'total_gradient:', total_gradient

        return total_gradient

    return e, g