Beispiel #1
0
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    for block in range(1, 4):
        with tf.variable_scope("block%d" % block):
            for layer in range(kwargs['num_layers']):
                with tf.variable_scope('layer_%d' % layer):
                    net = batch_normalization(net)
                    net = tf.nn.relu(net)
                    net = conv_1d(
                        net,
                        64,
                        3,
                        scope="conv_1d_%d" % layer,
                        weights_init=variance_scaling_initializer(dtype=dtype))

            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 8)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    net = conv_1d(net, 9, 1, scope='logits')
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    print("model in", net.get_shape())
    for block in range(1, 4):
        with tf.variable_scope("block%d" % block):
            if block > 1:
                net = tf.expand_dims(net, 3)
                net = tf.layers.max_pooling2d(net, [1, 2], [1, 2])
                net = tf.squeeze(net, axis=3)

            for layer in range(kwargs['num_layers']):
                with tf.variable_scope('layer_%d' % layer):
                    res = net
                    for sublayer in range(kwargs['num_sub_layers']):
                        res = batch_normalization(res,
                                                  scope='bn_%d' % sublayer)
                        res = tf.nn.relu(res)
                        res = conv_1d(
                            res,
                            32 * 2**(4 - block),
                            3,
                            scope="conv_1d_%d" % sublayer,
                            weights_init=variance_scaling_initializer(
                                dtype=dtype))
                    k = tf.get_variable(
                        "k",
                        initializer=tf.constant_initializer(1.0),
                        shape=[])
                    net = tf.nn.relu(k) * res + net
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 8)
    print("after slice", net.get_shape())
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    print("after transpose", net.get_shape())
    net = conv_1d(net, 9, 1, scope='logits')
    print("model out", net.get_shape())
    return {
        'logits': net,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype, **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    for block in range(1, 3):
        with tf.variable_scope("block%d" % block):
            for layer in range(kwargs['num_layers']):
                with tf.variable_scope('layer_%d' % layer):
                    res = net
                    for sublayer in range(kwargs['num_sub_layers']):
                        res = batch_normalization(
                            res, scope='bn_%d' % sublayer)
                        res = tf.nn.relu(res)
                        res = conv_1d(
                            res,
                            64,
                            3,
                            scope="conv_1d_%d" % sublayer,
                            weights_init=variance_scaling_initializer(
                                dtype=dtype)
                        )
                    k = tf.get_variable(
                        "k", initializer=tf.constant_initializer(1.0), shape=[])
                    net = tf.nn.relu(k) * res + net
            net = max_pool_1d(net, 2)
        net = tf.nn.relu(net)

    net = central_cut(net, block_size, 4)
    net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")
    # with tf.name_scope("RNN"):
    #     from tensorflow.contrib.cudnn_rnn import CudnnGRU, RNNParamsSaveable
    #     rnn_layer = CudnnGRU(
    #         num_layers=1,
    #         num_units=64,
    #         input_size=64,
    #     )
    #
    #     print(rnn_layer.params_size())
    #     import sys
    #     sys.exit(0)
    #     rnn_params = tf.get_variable("rnn_params", shape=[rnn_layer.params_size()], validate_shape=False)
    #     params_saveable = RNNParamsSaveable(
    #         rnn_layer.params_to_canonical, rnn_layer.canonical_to_params, [rnn_params])
    #     tf.add_to_collection(tf.GraphKeys.SAVEABLE_OBJECTS, params_saveable)
    #

    with tf.name_scope("RNN"):
        cell = tf.contrib.rnn.GRUCell(64)
        init_state = cell.zero_state(batch_size, dtype=tf.float32)
        outputs, final_state = tf.nn.dynamic_rnn(cell, net, initial_state=init_state, sequence_length=tf.div(X_len + 3, 4), time_major=True, parallel_iterations=128)

    net = conv_1d(outputs, 9, 1, scope='logits')
    return {
        'logits': net,
        'init_state': init_state,
        'final_state': final_state
    }
def model_fn(net, X_len, max_reach, block_size, out_classes, batch_size, dtype,
             **kwargs):
    """
        Args:
        net -> Input tensor shaped (batch_size, max_reach + block_size + max_reach, 3)
        Returns:
        logits -> Unscaled logits tensor in time_major form, (block_size, batch_size, out_classes)
    """

    with tf.name_scope("model"):
        print("model in", net.get_shape())
        for block in range(1, 4):
            with tf.variable_scope("block%d" % block):
                for layer in range(1, 20 + 1):
                    with tf.variable_scope('layer_%d' % layer):
                        res = net
                        for sublayer in [1, 2]:
                            res = batch_normalization(res,
                                                      scope='bn_%d' % sublayer)
                            res = tf.nn.relu(res)
                            res = conv_1d(
                                res,
                                64,
                                3,
                                scope="conv_1d_%d" % sublayer,
                                weights_init=variance_scaling_initializer(
                                    dtype=dtype))
                        k = tf.get_variable(
                            "k",
                            initializer=tf.constant_initializer(1.0),
                            shape=[])
                        net = tf.nn.relu(k) * res + net
                net = max_pool_1d(net, 2)

        cut_size = tf.shape(net)[1] - tf.div(block_size, 8)
        with tf.control_dependencies([
                tf.cond(
                    tflearn.get_training_mode(), lambda: tf.assert_equal(
                        tf.mod(cut_size, 2), 0, name="cut_size_assert"),
                    lambda: tf.no_op())
        ]):
            cut_size = tf.div(cut_size, 2)

        net = tf.slice(net, [0, cut_size, 0],
                       [-1, tf.div(block_size, 8), -1],
                       name="Cutting")
        print("after slice", net.get_shape())

        net = tf.transpose(net, [1, 0, 2], name="Shift_to_time_major")

        state_size = 64
        outputs = net
        print("outputs", outputs.get_shape())

        with tf.variable_scope("Output"):
            outputs = tf.reshape(outputs,
                                 [block_size // 8 * batch_size, state_size],
                                 name="flatten")
            W = tf.get_variable("W", shape=[state_size, out_classes])
            b = tf.get_variable("b", shape=[out_classes])
            outputs = tf.matmul(outputs, W) + b
            logits = tf.reshape(outputs,
                                [block_size // 8, batch_size, out_classes],
                                name="logits")
    print("model out", logits.get_shape())
    return {
        'logits': logits,
        'init_state': tf.constant(0),
        'final_state': tf.constant(0),
    }