示例#1
0
def dilated_conv1d(inputs,
                   filters,
                   filter_width=2,
                   dilation_rate=1,
                   pad='VALID',
                   activation=None,
                   name='dialted_conv_1d',
                   reuse=False):
    with tf.name_scope(name):
        if dilation_rate > 1:
            transformed = time_to_batch(inputs, dilation_rate)
            conv = conv1d(x=transformed,
                          num_filters=filters,
                          filter_size=filter_width,
                          stride=1,
                          pad=pad,
                          name=name,
                          reuse=reuse)
            restored = batch_to_time(conv, dilation_rate)
        else:
            restored = conv1d(x=inputs,
                              num_filters=filters,
                              filter_size=filter_width,
                              stride=1,
                              pad=pad,
                              name=name,
                              reuse=reuse)
        # Remove excess elements at the end.
        out_width = tf.shape(inputs)[1] - (filter_width - 1) * dilation_rate
        result = tf.slice(restored, [0, 0, 0], [-1, out_width, -1])

        if activation is not None:
            result = activation(result)

        return result
示例#2
0
def conv_1d_network(x,
                    ob_space,
                    ac_space,
                    conv_1d_num_layers=4,
                    conv_1d_num_filters=32,
                    conv_1d_filter_size=3,
                    conv_1d_stride=2,
                    pad="SAME",
                    dtype=tf.float32,
                    collections=None,
                    reuse=False,
                    **kwargs):
    """
    Stage1 network: from preprocessed 1D input to estimated features.
    Encapsulates convolutions, [possibly] skip-connections etc. Can be shared.

    Returns:
        tensor holding state features;
    """
    for i in range(conv_1d_num_layers):
        x = tf.nn.elu(
            conv1d(
                x,
                conv_1d_num_filters,
                "conv1d_{}".format(i + 1),
                conv_1d_filter_size,
                conv_1d_stride,
                pad,
                dtype,
                collections,
                reuse
            )
        )
    return x
示例#3
0
def conv_1d_casual_attention_encoder(
        x,
        keep_prob,
        ob_space=None,
        ac_space=None,
        conv_1d_num_filters=32,
        conv_1d_filter_size=2,
        conv_1d_activation=tf.nn.elu,
        conv_1d_attention_ref=tf.contrib.seq2seq.LuongAttention,
        # conv_1d_attention_ref=tf.contrib.seq2seq.BahdanauAttention,
        name='casual_encoder',
        reuse=False,
        collections=None,
        **kwargs):
    """
    Tree-shaped convolution stack encoder with self-attention.

    Stage1 casual convolutions network: from 1D input to estimated features.

    Returns:
        tensor holding state features;
    """

    with tf.variable_scope(name_or_scope=name, reuse=reuse):
        shape = x.get_shape().as_list()
        if len(shape) > 3:  # remove pseudo 2d dimension
            x = x[:, :, 0, :]
        num_layers = int(math.log(shape[1], conv_1d_filter_size))

        # print('num_layers: ', num_layers)

        layers = []
        attention_layers = []
        y = x

        for i in range(num_layers):

            _, length, channels = y.get_shape().as_list()

            # t2b:
            tail = length % conv_1d_filter_size
            if tail != 0:
                pad = conv_1d_filter_size - tail
                paddings = [[0, 0], [pad, 0], [0, 0]]
                y = tf.pad(y, paddings)
                length += pad

            # print('padded_length: ', length)

            num_time_batches = int(length / conv_1d_filter_size)

            # print('num_time_batches: ', num_time_batches)

            y = tf.reshape(y, [-1, conv_1d_filter_size, channels],
                           name='layer_{}_t2b'.format(i))

            y = conv1d(x=y,
                       num_filters=conv_1d_num_filters,
                       filter_size=conv_1d_filter_size,
                       stride=1,
                       pad='VALID',
                       name='conv1d_layer_{}'.format(i))
            y = tf.nn.dropout(y, keep_prob=keep_prob)
            # b2t:
            y = tf.reshape(y, [-1, num_time_batches, conv_1d_num_filters],
                           name='layer_{}_output'.format(i))
            y = norm_layer(y)
            if conv_1d_activation is not None:
                y = conv_1d_activation(y)

            layers.append(y)

            # Insert attention for all but top layer:
            if num_time_batches > 1:
                attention = attention_layer(
                    y,
                    attention_ref=conv_1d_attention_ref,
                    name='attention_layer_{}'.format(i))
                attention_layers.append(attention)

        convolved = tf.stack([h[:, -1, :] for h in layers],
                             axis=1,
                             name='convolved_stack')
        attended = tf.concat(attention_layers, axis=-2, name='attention_stack')

        encoded = tf.concat([convolved, attended],
                            axis=-2,
                            name='encoded_state')

        # print('convolved: ', convolved)
        # print('attention_stack: ', attended)
        # print('encoded :', encoded)

    return encoded
示例#4
0
def conv_1d_casual_encoder(
        x,
        ob_space,
        ac_space,
        conv_1d_num_filters=32,
        conv_1d_filter_size=2,
        conv_1d_activation=tf.nn.elu,
        conv_1d_overlap=1,
        name='casual_encoder',
        keep_prob=None,
        conv_1d_gated=False,
        reuse=False,
        collections=None,
        **kwargs
    ):
    """
    Tree-shaped convolution stack encoder as more comp. efficient alternative to dilated one.

    Stage1 casual convolutions network: from 1D input to estimated features.

    Returns:
        tensor holding state features;
    """

    with tf.variable_scope(name_or_scope=name, reuse=reuse):
        shape = x.get_shape().as_list()
        if len(shape) > 3:  # remove pseudo 2d dimension
            x = x[:, :, 0, :]
        num_layers = int(math.log(shape[1], conv_1d_filter_size))

        # print('num_layers: ', num_layers)

        layers = []
        slice_depth = []
        y = x

        for i in range(num_layers):

            _, length, channels = y.get_shape().as_list()

            # t2b:
            tail = length % conv_1d_filter_size
            if tail != 0:
                pad = conv_1d_filter_size - tail
                paddings = [[0, 0], [pad, 0], [0, 0]]
                y = tf.pad(y, paddings)
                length += pad

            # print('padded_length: ', length)

            num_time_batches = int(length / conv_1d_filter_size)

            # print('num_time_batches: ', num_time_batches)

            y = tf.reshape(y, [-1, conv_1d_filter_size, channels], name='layer_{}_t2b'.format(i))

            y = conv1d(
                x=y,
                num_filters=conv_1d_num_filters,
                filter_size=conv_1d_filter_size,
                stride=1,
                pad='VALID',
                name='conv1d_layer_{}'.format(i)
            )
            # b2t:
            y = tf.reshape(y, [-1, num_time_batches, conv_1d_num_filters], name='layer_{}_output'.format(i))

            y = norm_layer(y)
            if conv_1d_activation is not None:
                y = conv_1d_activation(y)

            if keep_prob is not None:
                y = tf.nn.dropout(y, keep_prob=keep_prob, name="_layer_{}_with_dropout".format(i))

            layers.append(y)

            depth = conv_1d_overlap // conv_1d_filter_size ** i

            if depth < 1:
                depth = 1

            slice_depth.append(depth)

        # encoded = tf.stack([h[:, -1, :] for h in layers], axis=1, name='encoded_state')

        sliced_layers = [
            tf.slice(
                h,
                begin=[0, h.get_shape().as_list()[1] - d, 0],
                size=[-1, d, -1]
            ) for h, d in zip(layers, slice_depth)
        ]
        output_stack = sliced_layers
        # But:
        if conv_1d_gated:
            split_size = int(conv_1d_num_filters / 2)
            output_stack = []
            for l in sliced_layers:
                x1 = l[..., :split_size]
                x2 = l[..., split_size:]

                y = tf.multiply(
                    x1,
                    tf.nn.sigmoid(x2),
                    name='gated_conv_output'
                )
                output_stack.append(y)

        encoded = tf.concat(output_stack, axis=1, name='encoded_state')

        # encoded = tf.concat(
        #     [
        #         tf.slice(
        #             h,
        #             begin=[0, h.get_shape().as_list()[1] - d, 0],
        #             size=[-1, d, -1]
        #         ) for h, d in zip(layers, slice_depth)
        #     ],
        #     axis=1,
        #     name='encoded_state'
        # )
        print('encoder :', encoded)

    return encoded