Ejemplo n.º 1
0
def softmax(logits, mask=None, scope=None):
    with tf.name_scope(scope or "Softmax"):
        if mask is not None:
            logits = exp_mask(logits, mask)
        flat_logits = flatten(logits, 1)
        flat_out = tf.nn.softmax(flat_logits)
        out = reconstruct(flat_out, logits, 1)

        return out
Ejemplo n.º 2
0
def linear(args,
           output_size,
           bias,
           bias_start=0.0,
           scope=None,
           squeeze=False,
           wd=0.0,
           input_keep_prob=1.0,
           is_train=None):  #, name_w='', name_b=''
    # if args is None or (nest.is_sequence(args) and not args):
    #     raise ValueError("`args` must be specified")
    # if not nest.is_sequence(args):
    #     args = [args]

    flat_args = [flatten(arg, 1) for arg in args]
    # if input_keep_prob < 1.0:
    #     assert is_train is not None
    flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args]

    total_arg_size = 0
    shapes = [a.get_shape() for a in flat_args]
    for shape in shapes:
        if shape.ndims != 2:
            raise ValueError("linear is expecting 2D arguments: %s" % shapes)
        if shape[1].value is None:
            raise ValueError(
                "linear expects shape[1] to be provided for shape %s, "
                "but saw %s" % (shape, shape[1]))
        else:
            total_arg_size += shape[1].value
    # print(total_arg_size)
    # exit()
    dtype = [a.dtype for a in flat_args][0]

    # scope = tf.get_variable_scope()
    with tf.variable_scope(scope) as outer_scope:
        weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME,
                                  [total_arg_size, output_size],
                                  dtype=dtype)
        if len(flat_args) == 1:
            res = tf.matmul(flat_args[0], weights)
        else:
            res = tf.matmul(tf.concat(flat_args, 1), weights)
        if not bias:
            flat_out = res
        else:
            with tf.variable_scope(outer_scope) as inner_scope:
                inner_scope.set_partitioner(None)
                biases = tf.get_variable(_BIAS_VARIABLE_NAME, [output_size],
                                         dtype=dtype,
                                         initializer=tf.constant_initializer(
                                             bias_start, dtype=dtype))
            flat_out = tf.nn.bias_add(res, biases)

    out = reconstruct(flat_out, args[0], 1)
    if squeeze:
        out = tf.squeeze(out, [len(args[0].get_shape().as_list()) - 1])
    if wd:
        add_wd(wd)

    return out
Ejemplo n.º 3
0
            out = conv1d(in_,
                         filter_size,
                         height,
                         padding,
                         keep_prob=keep_prob,
                         scope="conv1d_{}".format(height))
            outs.append(out)
        concat_out = tf.concat(outs, axis=2)
        return concat_out


if __name__ == '__main__':
    a = tf.Variable(np.random.random(size=(2, 2, 4)))
    b = tf.Variable(np.random.random(size=(2, 3, 4)))
    c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1])
    test = flatten(c, 1)
    out = reconstruct(test, c, 1)
    d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1])
    e = linear(
        [c, d, c * d],
        1,
        bias=False,
        scope="test",
    )
    # f = softsel(d, e)
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        print(sess.run(test))
        print(sess.run(tf.shape(out)))
        exit()
        print(sess.run(tf.shape(a)))