Пример #1
0
def build_layer(line,
                layer_in,
                seq_length=None,
                keep_in_prob=None,
                keep_out_prob=None):
    layer_type, info = line.split(' ', 1)
    if layer_type == '<AffineTransform>':
        layer_out = layer.affine_transform(info, layer_in)
    if layer_type == '<LinearTransform>':
        layer_out = layer.linear_transform(info, layer_in)
    elif layer_type == '<BatchNormalization>':
        layer_out = layer.batch_normalization(info, layer_in)
    elif layer_type == '<Sigmoid>':
        layer_out = tf.sigmoid(layer_in)
    elif layer_type == '<Relu>':
        layer_out = tf.nn.relu(layer_in)
    elif layer_type == '<Tanh>':
        layer_out = tf.tanh(layer_in)
    elif layer_type == '<Softmax>':
        layer_out = tf.nn.softmax(layer_in)
    elif layer_type == '<Dropout>':
        layer_out = tf.nn.dropout(layer_in, float(info))
    elif layer_type == '<LSTM>':
        layer_out = layer.lstm(info, layer_in, seq_length, keep_in_prob,
                               keep_out_prob)
    elif layer_type == '<BLSTM>':
        layer_out = layer.blstm(info, layer_in, seq_length, keep_in_prob,
                                keep_out_prob)

    return layer_out
Пример #2
0
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5, padding="VALID")
     result = layer.max_pool(result)  # 12
     result = layer.conv_relu(result, 18, 24, width=5, padding="VALID")
     result = layer.max_pool(result)  # 4
     result = tf.nn.dropout(result, keep_prob)
     return layer.conv(result, 24, 10, width=4, padding="VALID")
Пример #3
0
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result,
                              1,
                              18,
                              width=5,
                              stride=2,
                              padding="VALID")
     return layer.conv(result, 18, 10, width=12, padding="VALID")
Пример #4
0
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5)
     result = layer.max_pool(result)  # 14
     result = tf.nn.relu(drop_conv(keep_prob, result, 18, 24, width=5))
     result = layer.max_pool(result)  # 7
     result = tf.nn.relu(
         drop_conv(keep_prob, result, 24, 32, width=5, padding="VALID"))
     return layer.conv(result, 32, 10, width=3, padding="VALID")
Пример #5
0
def build_layer(line,
                layer_in,
                seq_length=None,
                mask_holder=None,
                keep_in_prob=None,
                keep_out_prob=None,
                keep_prob=None,
                reuse=False):
    layer_type, info = line.split(' ', 1)
    if layer_type == '<AffineTransform>':
        layer_out = layer.affine_transform(info, layer_in)
    elif layer_type == '<LinearTransform>':
        layer_out = layer.linear_transform(info, layer_in)
    elif layer_type == '<BatchNormalization>':
        layer_out = layer.batch_normalization(info, layer_in)
    elif layer_type == '<AffineBatchNormalization>':
        layer_out = layer.affine_batch_normalization(info, layer_in)
    elif layer_type == '<TDNNAffineTransform>':
        layer_out = layer.tdnn_affine_transform(info, layer_in)
    elif layer_type == '<Sigmoid>':
        layer_out = tf.sigmoid(layer_in)
    elif layer_type == '<Relu>':
        layer_out = tf.nn.relu(layer_in)
    elif layer_type == '<Relu6>':
        layer_out = tf.nn.relu6(layer_in)
    elif layer_type == '<Tanh>':
        layer_out = tf.tanh(layer_in)
    elif layer_type == '<Softmax>':
        layer_out = tf.nn.softmax(layer_in)
    elif layer_type == '<Dropout>':
        layer_out = tf.nn.dropout(layer_in, keep_prob)
    elif layer_type == '<LSTM>':
        layer_out = layer.lstm(info,
                               layer_in,
                               seq_length,
                               keep_in_prob,
                               keep_out_prob,
                               reuse=reuse)
    elif layer_type == '<BLSTM>':
        layer_out = layer.blstm(info,
                                layer_in,
                                seq_length,
                                keep_in_prob,
                                keep_out_prob,
                                reuse=reuse)
    elif layer_type == '<Pooling>':
        layer_out = layer.pooling(info, layer_in, mask_holder, reuse=reuse)
    else:
        raise RuntimeError("layer_type %s not supported" % layer_type)

    return layer_out
 def convolve(self, image, training, keep_prob):
     result = image
     result = layer.batch_normalization(result, training)
     result = layer.conv(result, 1, 16, width=5, stride=2, padding="VALID")
     result = tf.nn.tanh(result)
     result = layer.conv(result, 16, 16, width=3, stride=2, padding="VALID")
     result = tf.nn.tanh(result)
     result = layer.conv(result, 16, 32, width=3, padding="VALID")
     result = tf.nn.tanh(result)
     result = layer.conv(result, 32, 32, width=3, padding="VALID")
     result = tf.nn.tanh(result)
     result = tf.nn.dropout(result, keep_prob)
     result = layer.conv_relu(result, 32, 10, width=1, padding="VALID")
     return result
Пример #7
0
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5)
     result = layer.resnet_block(result, 18, 3, training, momentum=0.99)
     result = layer.max_pool(result)  # 14
     result = layer.resnet_block(result, 18, 3, training, momentum=0.99)
     result = layer.conv_relu(result, 18, 24, width=5)
     result = layer.resnet_block(result, 24, 3, training, momentum=0.99)
     result = layer.max_pool(result)  # 7
     result = layer.resnet_block(result, 24, 3, training, momentum=0.99)
     result = layer.conv_relu(result, 24, 32, width=5, padding="VALID")
     result = layer.resnet_block(result, 32, 3, training, momentum=0.99)
     result = tf.nn.dropout(result, keep_prob)
     return layer.conv(result, 32, 10, width=3, padding="VALID")
Пример #8
0
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5)
     result = layer.max_pool(result)  # 14
     result = layer.resnet_block(result, 18, 3, training)
     result = layer.conv_relu(result, 18, 24, width=3)
     result = layer.max_pool(result)  # 7
     result = layer.resnet_block(result, 24, 3, training)
     result = layer.resnet_block(result, 24, 3, training)
     return layer.drop_conv(keep_prob,
                            result,
                            24,
                            10,
                            width=7,
                            padding="VALID")
Пример #9
0
 def convolve(self, image, training, keep_prob):
     result = layer.batch_normalization(image, training)
     result = layer.conv_relu(result, 1, 18, width=5, padding="VALID")
     result = layer.max_pool(result)  # 12
     result = layer.resnet_block(result, 18, 3, training)
     result = layer.resnet_block(result, 18, 3, training)
     result = layer.max_pool(result)  # 6
     result = layer.conv_relu(result, 18, 24, width=1)
     result = layer.resnet_narrow(result, 24, 3, training)
     result = layer.resnet_narrow(result, 24, 3, training)
     result = layer.max_pool(result)  # 3
     result = layer.conv_relu(result, 24, 32, width=1)
     result = layer.resnet_narrow(result, 32, 3, training)
     result = layer.resnet_narrow(result, 32, 3, training)
     return layer.drop_conv(keep_prob,
                            result,
                            32,
                            10,
                            width=3,
                            padding="VALID")