Beispiel #1
0
def lambda_gradient(ys, xs, order=1, name=''):
    """Returns the gradients of y in `ys` w.r.t. x in `xs` using Lambda layers.
    
    `ys` and `xs` are each a Tensor or a list of tensors.

    # Arguments
        ys: A tensor or list of tesnors to be differentiated.
        xs: A tensor or list of tensors to be used for differentiation.
        name: A str name for the Lambda layer. 

    # Returns
        A tuple, `(layers, grads)`.
        layers: A Lambda layer or list of Lambda layers where the gradient operator is applied.
        grads: A gradient tensor or list of gradient tensors. 
    """
    name_prefix = 'Grad_' if order == 1 else 'Grad{:d}_'.format(order)

    grads, layers = [], []
    for y in to_list(ys):
        lay = Lambda(lambda y: gradients(y, xs, order))
        lay.name = name_prefix + name + '/' + lay.name
        layers += to_list(lay)
        grads += to_list(lay(y))

    return (unpack_singleton(layers), unpack_singleton(grads))
Beispiel #2
0
def diag(f):
    """Diag operation converts a vector output (None, N) to a matrix form of (None,N,N) functional.

    # Arguments
        f: Functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)

    lmbd = []
    outputs = []
    for o in f.outputs:
        assert len(o.shape) == 2, \
            'Exptected output dimension to be (None, N)'
        dim = o.shape[-1]
        l = Lambda(lambda x: tf.linalg.diag(x))
        l.name = "diag_" + l.name.split("_")[-1]
        lmbd += [l]
        outputs += [l(o)]

    Functional = f.get_class()
    res = Functional(inputs=f.inputs.copy(), outputs=outputs, layers=lmbd)
    return res
Beispiel #3
0
def dot(f, other):
    """Dot product of two `Functional` objects.

    # Arguments
        f: Functional object.
        other: A python number or a tensor or a functional object.

    # Returns
        A Functional.
    """
    validate_functional(f)
    validate_functional(other)
    assert len(f.outputs) == len(other.outputs)

    outputs = []
    layers = []
    for fl, fr in zip(f.outputs, other.outputs):
        assert fl.shape.as_list() == fr.shape.as_list(),\
            'Expected equal dimensions for output of functionals. '
        l = Lambda(lambda x: K.reshape(
            tf.math.reduce_sum(x * fr, list(range(1, len(fl.shape)))), [-1, 1])
                   )
        l.name = "dot/" + l.name.split("_")[-1]
        layers += [l]
        outputs += [l(fl)]

    inputs = to_list(f.inputs) + to_list(other.inputs)
    Functional = f.get_class()
    res = Functional(inputs=unique_tensors(inputs),
                     outputs=outputs,
                     layers=layers)
    return res
Beispiel #4
0
def get_decoder_outputs(output_modalities, decoders, embeddings):
    assert len(output_modalities) == len(decoders)

    outputs = list()
    for di, decode in enumerate(decoders):
        for emi, em in enumerate(embeddings):
            out_em = decode(em)
            name = 'em_' + str(emi) + '_dec_' + output_modalities[di]
            l = Lambda(lambda x: x + 0, name=name)(out_em)
            l.name = name
            outputs.append(l)
            print 'making output:', em.type, out_em.type, name

    return outputs
def avg_pool_model(facenet_model):
    """
    insert a global average pool layer to avg pool all faces in a given template
    :param facenet_model:
    :return:
    """
    # facenet_model.layers.pop()
    # facenet_model.layers.pop()
    # facenet_model.layers.pop()
    # facenet_model.summary()
    average = Lambda(lambda z: K.mean(z, axis=0))
    average.name = 'lambda_99999'
    x = average(facenet_model.layers[-1].output)
    # x = K.map_fn(average, facenet_model.layers[-1].output)
    aggregate_model = Model(inputs=facenet_model.input, outputs=x)
    aggregate_model.summary()

    return aggregate_model
Beispiel #6
0
def _lambda_gradient(ys, xs, order=1, gtype='Grad', name=''):
    """Returns the gradients of y in `ys` w.r.t. x in `xs` using Lambda layers.
    
    `ys` and `xs` are each a Tensor or a list of tensors.

    # Arguments
        ys: A tensor or list of tesnors to be differentiated.
        xs: A tensor or list of tensors to be used for differentiation.
        gtype: type of differentiation - can be:
            - 'Grad' for gradient operation, i.e. Gij = dy_j / dx_i
            - 'dGrad' for the diagonal of gradient tensor, i.e. Gi = dy_i / dx_i
            - 'Div' for divergence operation, i.e. G = sum(dy_i / dx_i)
        name: A str name for the Lambda layer. 

    # Returns
        A tuple, `(layers, grads)`.
        layers: A Lambda layer or list of Lambda layers where the gradient operator is applied.
        grads: A gradient tensor or list of gradient tensors. 
    """

    grads, layers = [], []
    for y in to_list(ys):
        if gtype == 'Grad':
            lay = Lambda(lambda y: _gradients(y, xs, order))
            name_prefix = 'Grad_' if order == 1 else 'Grad{:d}_'.format(order)
        elif gtype == 'dGrad':
            lay = Lambda(lambda y: _diag_gradients(y, xs, order))
            name_prefix = 'DiagGrad_' if order == 1 else 'Grad{:d}_'.format(
                order)
        elif gtype == 'Div':
            lay = Lambda(lambda y: _div_gradients(y, xs, order))
            name_prefix = 'Div_' if order == 1 else 'Grad{:d}_'.format(order)
        else:
            raise ValueError('Unrecognised gradient type: {} \n'.format(type) +
                             '     Please choose among (Grad, dGrad, Div). ')
        lay.name = name_prefix + name + '/' + lay.name
        layers += to_list(lay)
        grads += to_list(lay(y))

    return (unpack_singleton(layers), unpack_singleton(grads))