コード例 #1
0
def model(x, y, is_training):
    # %% We'll convert our MNIST vector data to a 4-D tensor:
    # N x W x H x C
    x_tensor = tf.reshape(x, [-1, 28, 28, 1])

    # %% We'll use a new method called  batch normalization.
    # This process attempts to "reduce internal covariate shift"
    # which is a fancy way of saying that it will normalize updates for each
    # batch using a smoothed version of the batch mean and variance
    # The original paper proposes using this before any nonlinearities
    h_1 = lrelu(batch_norm(conv2d(x_tensor, 32, name='conv1'),
                           is_training,
                           scope='bn1'),
                name='lrelu1')
    h_2 = lrelu(batch_norm(conv2d(h_1, 64, name='conv2'),
                           is_training,
                           scope='bn2'),
                name='lrelu2')
    h_3 = lrelu(batch_norm(conv2d(h_2, 64, name='conv3'),
                           is_training,
                           scope='bn3'),
                name='lrelu3')
    h_3_flat = tf.reshape(h_3, [-1, 64 * 4 * 4])
    h_4 = linear(h_3_flat, 10)
    y_pred = tf.nn.softmax(h_4)

    # %% Define loss/eval/training functions
    cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
    train_step = tf.train.AdamOptimizer().minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

    return [train_step, accuracy]
コード例 #2
0
def residual_network(x, n_outputs, activation=tf.nn.relu):
    LayerBlock = namedtuple('LayerBlock', ['num_repeats', 'num_filters', 'bottleneck_size'])
    blocks = [
    LayerBlock(3, 128, 32),
    LayerBlock(3, 256, 64),
    LayerBlock(3, 512, 128),
    LayerBlock(3, 1024, 256)]

    # 如果数据是二维的,则转一下,对标mnist
    input_shape = x.get_shape().as_list()
    if len(input_shape) == 2:
        ndim = int(sqrt(input_shape[1]))
        if ndim * ndim != input_shape[1]:
            raise ValueError('input_shape should be square')
        x = tf.reshape(x, [-1, ndim, ndim, 1])

    net = conv2d(x, 64, k_h=7, k_w=7, name='conv1', activation=activation)
    net = tf.nn.max_pool(net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
    net = conv2d(net, blocks[0].num_filters, k_h=1, k_w=1,stride_h=1, stride_w=1, padding='VALID', name='conv2')

    for block_i, block in enumerate(blocks):
        for repeat_i in range(block.num_repeats):
            name = 'block_%d/repeat_%d' % (block_i, repeat_i)
            conv = conv2d(net, block.bottleneck_size, k_h=1, k_w=1, padding='VALID', stride_h=1, stride_w=1, activation=activation, name=name + '/conv_in')
            conv = conv2d(conv, block.bottleneck_size, k_h=3, k_w=3, padding='SAME', stride_h=1, stride_w=1, activation=activation, name=name + '/conv_bottleneck')
            conv = conv2d(conv, block.num_filters, k_h=1, k_w=1, padding='VALID', stride_h=1, stride_w=1, activation=activation, name=name + '/conv_out')
            net = conv + net
        try:
            next_block = blocks[block_i + 1]
            net = conv2d(net, next_block.num_filters, k_h=1, k_w=1, padding='SAME', stride_h=1, stride_w=1, bias=False, name='block_%d/conv_upscale' % block_i)
        except IndexError:
            pass

    net = tf.nn.avg_pool(net, ksize=[1, net.get_shape().as_list()[1], net.get_shape().as_list()[2], 1], strides=[1, 1, 1, 1], padding='VALID')
    net = tf.reshape(net, [-1, net.get_shape().as_list()[1] * net.get_shape().as_list()[2] * net.get_shape().as_list()[3]])
    net = linear(net, n_outputs, activation=tf.nn.softmax)

    return net
コード例 #3
0
def residual_network(x, n_outputs,
                     activation=tf.nn.relu):
    """Builds a residual network.
    Parameters
    ----------
    x : Placeholder
        Input to the network
    n_outputs : TYPE
        Number of outputs of final softmax
    activation : Attribute, optional
        Nonlinearity to apply after each convolution
    Returns
    -------
    net : Tensor
        Description
    Raises
    ------
    ValueError
        If a 2D Tensor is input, the Tensor must be square or else
        the network can't be converted to a 4D Tensor.
    """
    # %%
    LayerBlock = namedtuple(
        'LayerBlock', ['num_repeats', 'num_filters', 'bottleneck_size'])
    blocks = [LayerBlock(3, 128, 32),
              LayerBlock(3, 256, 64),
              LayerBlock(3, 512, 128),
              LayerBlock(3, 1024, 256)]

    # %%
    input_shape = x.get_shape().as_list()
    if len(input_shape) == 2:
        ndim = int(sqrt(input_shape[1]))
        if ndim * ndim != input_shape[1]:
            raise ValueError('input_shape should be square')
        x = tf.reshape(x, [-1, ndim, ndim, 1])

    # %%
    # First convolution expands to 64 channels and downsamples
    net = conv2d(x, 64, k_h=7, k_w=7,
                 name='conv1',
                 activation=activation)

    # %%
    # Max pool and downsampling
    net = tf.nn.max_pool(
        net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

    # %%
    # Setup first chain of resnets
    net = conv2d(net, blocks[0].num_filters, k_h=1, k_w=1,
                 stride_h=1, stride_w=1, padding='VALID', name='conv2')

    # %%
    # Loop through all res blocks
    for block_i, block in enumerate(blocks):
        for repeat_i in range(block.num_repeats):

            name = 'block_%d/repeat_%d' % (block_i, repeat_i)
            conv = conv2d(net, block.bottleneck_size, k_h=1, k_w=1,
                          padding='VALID', stride_h=1, stride_w=1,
                          activation=activation,
                          name=name + '/conv_in')

            conv = conv2d(conv, block.bottleneck_size, k_h=3, k_w=3,
                          padding='SAME', stride_h=1, stride_w=1,
                          activation=activation,
                          name=name + '/conv_bottleneck')

            conv = conv2d(conv, block.num_filters, k_h=1, k_w=1,
                          padding='VALID', stride_h=1, stride_w=1,
                          activation=activation,
                          name=name + '/conv_out')

            net = conv + net
        try:
            # upscale to the next block size
            next_block = blocks[block_i + 1]
            net = conv2d(net, next_block.num_filters, k_h=1, k_w=1,
                         padding='SAME', stride_h=1, stride_w=1, bias=False,
                         name='block_%d/conv_upscale' % block_i)
        except IndexError:
            pass

    # %%
    net = tf.nn.avg_pool(net,
                         ksize=[1, net.get_shape().as_list()[1],
                                net.get_shape().as_list()[2], 1],
                         strides=[1, 1, 1, 1], padding='VALID')
    net = tf.reshape(
        net,
        [-1, net.get_shape().as_list()[1] *
         net.get_shape().as_list()[2] *
         net.get_shape().as_list()[3]])

    net = linear(net, n_outputs, activation=tf.nn.softmax)

    # %%
    return net
コード例 #4
0
def residual_network(x, n_outputs,
                     activation=tf.nn.relu):
    """Builds a residual network.

    Parameters
    ----------
        x : Placeholder  Input to the network
        n_outputs : TYPE    Number of outputs of final softmax
        activation : Attribute, optional    Nonlinearity to apply after each convolution

    Returns
    -------
        net : Tensor    Description

    Raises
    ------
    ValueError
        If a 2D Tensor is input, the Tensor must be square or else
        the network can't be converted to a 4D Tensor.
    """
    # 
    LayerBlock = namedtuple(
        'LayerBlock', ['num_repeats', 'num_filters', 'bottleneck_size'])
    blocks = [LayerBlock(3, 128, 32),
              LayerBlock(3, 256, 64),
              LayerBlock(3, 512, 128),
              LayerBlock(3, 1024, 256)]

    # 
    input_shape = x.get_shape().as_list()
    if len(input_shape) == 2:
        ndim = int(sqrt(input_shape[1]))
        if ndim * ndim != input_shape[1]:
            raise ValueError('input_shape should be square')
        x = tf.reshape(x, [-1, ndim, ndim, 1])

    # First convolution expands to 64 channels and downsamples
    net = conv2d(x, 64, k_h=7, k_w=7, name='conv1', activation=activation)

    # Max pool and downsampling
    net = tf.nn.max_pool(net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')

    # Setup first chain of resnets
    net = conv2d(net, blocks[0].num_filters, k_h=1, k_w=1, stride_h=1, stride_w=1, padding='VALID', name='conv2')

    # Loop through all res blocks
    for block_i, block in enumerate(blocks):
        for repeat_i in range(block.num_repeats):

            name = 'block_%d/repeat_%d' % (block_i, repeat_i)
            conv = conv2d(net, block.bottleneck_size, k_h=1, k_w=1,
                          padding='VALID', stride_h=1, stride_w=1,
                          activation=activation,
                          name=name + '/conv_in')

            conv = conv2d(conv, block.bottleneck_size, k_h=3, k_w=3,
                          padding='SAME', stride_h=1, stride_w=1,
                          activation=activation,
                          name=name + '/conv_bottleneck')

            conv = conv2d(conv, block.num_filters, k_h=1, k_w=1,
                          padding='VALID', stride_h=1, stride_w=1,
                          activation=activation,
                          name=name + '/conv_out')

            net = conv + net
        try:
            # upscale to the next block size
            next_block = blocks[block_i + 1]
            net = conv2d(net, next_block.num_filters, k_h=1, k_w=1,
                         padding='SAME', stride_h=1, stride_w=1, bias=False,
                         name='block_%d/conv_upscale' % block_i)
        except IndexError:
            pass

    # 
    net = tf.nn.avg_pool(net,
                         ksize=[1, net.get_shape().as_list()[1],
                                net.get_shape().as_list()[2], 1],
                         strides=[1, 1, 1, 1], padding='VALID')
    net = tf.reshape(
        net,
        [-1, net.get_shape().as_list()[1] *
         net.get_shape().as_list()[2] *
         net.get_shape().as_list()[3]])

    net = linear(net, n_outputs, activation=tf.nn.softmax)

    # 
    return net
コード例 #5
0
# training/testing.
is_training = tf.placeholder(tf.bool, name='is_training')

# %% We'll convert our MNIST vector data to a 4-D tensor:
# N x W x H x C
x_tensor = tf.reshape(x, [-1, 28, 28, 1])
#ema.apply([batch_mean, batch_var])
# %% We'll use a new method called  batch normalization.
# This process attempts to "reduce internal covariate shift"
# which is a fancy way of saying that it will normalize updates for each
# batch using a smoothed version of the batch mean and variance
'''
# The original paper proposes using this before any nonlinearities!!!!!!!!!!!!!!!
'''
# The original paper proposes using this before any nonlinearities!!!!!!!!!!!!!!!
h_1 = lrelu(batch_norm(conv2d(x_tensor, 32, name='conv1'),
                       is_training,
                       scope='bn1'),
            name='lrelu1')
h_2 = lrelu(batch_norm(conv2d(h_1, 64, name='conv2'), is_training,
                       scope='bn2'),
            name='lrelu2')
h_3 = lrelu(batch_norm(conv2d(h_2, 64, name='conv3'), is_training,
                       scope='bn3'),
            name='lrelu3')
h_3_flat = tf.reshape(h_3, [-1, 64 * 4 * 4])
h_4 = linear(h_3_flat, 10)
y_pred = tf.nn.softmax(h_4)

# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
コード例 #6
0
# %% We add a new type of placeholder to denote when we are training.
# This will be used to change the way we compute the network during
# training/testing.
is_training = tf.placeholder(tf.bool, name='is_training')

# %% We'll convert our MNIST vector data to a 4-D tensor:
# N x W x H x C
x_tensor = tf.reshape(x, [-1, 28, 28, 1])

# %% We'll use a new method called  batch normalization.
# This process attempts to "reduce internal covariate shift"
# which is a fancy way of saying that it will normalize updates for each
# batch using a smoothed version of the batch mean and variance
# The original paper proposes using this before any nonlinearities
h_1 = lrelu(batch_norm(conv2d(x_tensor, 32, name='conv1'),
                       is_training, scope='bn1'), name='lrelu1')
h_2 = lrelu(batch_norm(conv2d(h_1, 64, name='conv2'),
                       is_training, scope='bn2'), name='lrelu2')
h_3 = lrelu(batch_norm(conv2d(h_2, 64, name='conv3'),
                       is_training, scope='bn3'), name='lrelu3')
h_3_flat = tf.reshape(h_3, [-1, 64 * 4 * 4])
h_4 = linear(h_3_flat, 10)
y_pred = tf.nn.softmax(h_4)

# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
コード例 #7
0
ファイル: 예제5-3.py プロジェクト: TaehoLi/DL-Tensorflow
epochs = 100
batch_size = 100
X = tf.placeholder(tf.float32, [None, 784])
X_img = tf.reshape(X, [-1, 28, 28, 1])
Y = tf.placeholder(tf.float32, [None, 10])
# ResNet 블록 구조(bottleneck 구조)
LayerBlock = namedtuple('LayerBlock',
                        ['num_repeats', 'num_filters', 'bottleneck_size'])
blocks = [
    LayerBlock(3, 128, 32),
    LayerBlock(3, 256, 64),
    LayerBlock(3, 512, 128),
    LayerBlock(3, 1024, 256)
]
# 채널수 64의 합성곱 출력을 만들고 다운샘플링
net = conv2d(X_img, 64, k_h=7, k_w=7, name='conv1', activation=tf.nn.relu)
net = tf.nn.max_pool(net, [1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# ResNet 블록구조의 입력 생성
net = conv2d(net,
             blocks[0].num_filters,
             k_h=1,
             k_w=1,
             stride_h=1,
             stride_w=1,
             padding='VALID',
             name='conv2')
# ResNet 블록 반복
for block_i, block in enumerate(blocks):
    for repeat_i in range(block.num_repeats):
        name = 'block_%d/repeat_%d' % (block_i, repeat_i)
        conv1 = conv2d(net,
コード例 #8
0
def residual_network(x, n_outputs, activation=tf.nn.relu):
    LayerBlock = namedtuple('LayerBlock',
                            ['num_repeats', 'num_filters', 'bottleneck_size'])
    blocks = [
        LayerBlock(3, 128, 32),
        LayerBlock(3, 256, 64),
        LayerBlock(3, 512, 128),
        LayerBlock(3, 1024, 256)
    ]

    # 如果数据是二维的,则转一下,对标mnist
    input_shape = x.get_shape().as_list()
    if len(input_shape) == 2:
        ndim = int(sqrt(input_shape[1]))
        if ndim * ndim != input_shape[1]:
            raise ValueError('input_shape should be square')
        x = tf.reshape(x, [-1, ndim, ndim, 1])

    net = conv2d(x, 64, k_h=7, k_w=7, name='conv1', activation=activation)
    net = tf.nn.max_pool(net, [1, 3, 3, 1],
                         strides=[1, 2, 2, 1],
                         padding='SAME')
    net = conv2d(net,
                 blocks[0].num_filters,
                 k_h=1,
                 k_w=1,
                 stride_h=1,
                 stride_w=1,
                 padding='VALID',
                 name='conv2')

    for block_i, block in enumerate(blocks):
        for repeat_i in range(block.num_repeats):
            name = 'block_%d/repeat_%d' % (block_i, repeat_i)
            conv = conv2d(net,
                          block.bottleneck_size,
                          k_h=1,
                          k_w=1,
                          padding='VALID',
                          stride_h=1,
                          stride_w=1,
                          activation=activation,
                          name=name + '/conv_in')
            conv = conv2d(conv,
                          block.bottleneck_size,
                          k_h=3,
                          k_w=3,
                          padding='SAME',
                          stride_h=1,
                          stride_w=1,
                          activation=activation,
                          name=name + '/conv_bottleneck')
            conv = conv2d(conv,
                          block.num_filters,
                          k_h=1,
                          k_w=1,
                          padding='VALID',
                          stride_h=1,
                          stride_w=1,
                          activation=activation,
                          name=name + '/conv_out')
            net = conv + net
        try:
            next_block = blocks[block_i + 1]
            net = conv2d(net,
                         next_block.num_filters,
                         k_h=1,
                         k_w=1,
                         padding='SAME',
                         stride_h=1,
                         stride_w=1,
                         bias=False,
                         name='block_%d/conv_upscale' % block_i)
        except IndexError:
            pass

    net = tf.nn.avg_pool(net,
                         ksize=[
                             1,
                             net.get_shape().as_list()[1],
                             net.get_shape().as_list()[2], 1
                         ],
                         strides=[1, 1, 1, 1],
                         padding='VALID')
    net = tf.reshape(net, [
        -1,
        net.get_shape().as_list()[1] * net.get_shape().as_list()[2] *
        net.get_shape().as_list()[3]
    ])
    net = linear(net, n_outputs, activation=tf.nn.softmax)

    return net
コード例 #9
0
Parag K. Mital, Jan 2016.
"""
# %%
import tensorflow as tf
from libs.batch_norm import batch_norm
from libs.activations import lrelu
from libs.connections import conv2d, linear
import cmtf.data.data_mnist as data_mnist

mnist = data_mnist.read_data_sets(one_hot=True)

x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
is_training = tf.placeholder(tf.bool, name='is_training')
x_tensor = tf.reshape(x, [-1, 28, 28, 1])
h_1 = lrelu(batch_norm(conv2d(x_tensor, 32, name='conv1'), is_training, scope='bn1'), name='lrelu1')
h_2 = lrelu(batch_norm(conv2d(h_1, 64, name='conv2'), is_training, scope='bn2'), name='lrelu2')
h_3 = lrelu(batch_norm(conv2d(h_2, 64, name='conv3'), is_training, scope='bn3'), name='lrelu3')
h_3_flat = tf.reshape(h_3, [-1, 64 * 4 * 4])
h_4 = linear(h_3_flat, 10)
y_pred = tf.nn.softmax(h_4)

# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

# %% We now create a new session to actually perform the initialization the
# variables:
コード例 #10
0
# %% We'll convert our MNIST vector data to a 4-D tensor:
# N x W x H x C

x_tensor = tf.reshape(x, [-1, 1, 26, 1])  # FOR MFCC-26 dim
#x_tensor = tf.reshape(x, [-1, 1, 40, 1])  # FOR CONVAE

# %% We'll use a new method called  batch normalization.
# This process attempts to "reduce internal covariate shift"
# which is a fancy way of saying that it will normalize updates for each
# batch using a smoothed version of the batch mean and variance
# The original paper proposes using this before any nonlinearities

h_1 = lrelu(batch_norm(conv2d(x_tensor,
                              32,
                              name='conv1',
                              stride_h=1,
                              k_h=1,
                              k_w=3,
                              pool_size=[1, 1, 2, 1],
                              pool_stride=[1, 1, 1, 1]),
                       phase_train=is_training,
                       scope='bn1'),
            name='lrelu1')

h_2 = lrelu(batch_norm(conv2d(h_1,
                              64,
                              name='conv2',
                              stride_h=1,
                              k_h=1,
                              k_w=3,
                              pool_size=[1, 1, 2, 1],
                              pool_stride=[1, 1, 1, 1]),
コード例 #11
0
from libs.connections import conv2d, linear
from libs.datasets import MNIST


# %% Setup input to the network and true output label.  These are
# simply placeholders which we'll fill in later.
mnist = MNIST()
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])
x_tensor = tf.reshape(x, [-1, 28, 28, 1])

# %% Define the network:
bn1 = batch_norm(-1, name='bn1')
bn2 = batch_norm(-1, name='bn2')
bn3 = batch_norm(-1, name='bn3')
h_1 = lrelu(bn1(conv2d(x_tensor, 32, name='conv1')), name='lrelu1')
h_2 = lrelu(bn2(conv2d(h_1, 64, name='conv2')), name='lrelu2')
h_3 = lrelu(bn3(conv2d(h_2, 64, name='conv3')), name='lrelu3')
h_3_flat = tf.reshape(h_3, [-1, 64 * 4 * 4])
h_4 = linear(h_3_flat, 10)
y_pred = tf.nn.softmax(h_4)

# %% Define loss/eval/training functions
cross_entropy = -tf.reduce_sum(y * tf.log(y_pred))
train_step = tf.train.AdamOptimizer().minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

# %% We now create a new session to actually perform the initialization the
# variables: