Exemple #1
0
def _pretrained_resnet_biases_initializer(name,
                                          data,
                                          info=DummyDict(),
                                          full_info=DummyDict(),
                                          pre_adjust_batch_norm=False,
                                          bn_name=None,
                                          scale_name=None):
    shape = None
    #callback = lambda x: x
    if name in data and '1' in data[name]:
        init_type = 'file'
        sc_sigma = data[name]['0'].copy()
        sc_bias = data[name]['1'].copy()
        #if pre_adjust_batch_norm and scale_name is not None and bn_name is not None and bn_name in data:
        if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
            bn_data = data[bn_name]
            bn_sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
            mu = bn_data['0'] / bn_data['2']
            #sc_bias = sc_bias - mu * sc_sigma / bn_sigma
            #callback = lambda x: x - mu * sc_sigma / bn_sigma
            #sc_bias = -mu / bn_sigma
            #sc_bias = -mu
            sc_bias = np.zeros_like(mu)
            init_type += ':batch-adjusted'  #(b-={})'.format(mu*sc_sigma/bn_sigma)
        init = tf.constant_initializer(sc_bias)
        #if full_info['config']['return_weights']:
        #full_info['weights'][name+':biases'] = sc_bias
        shape = sc_bias.shape
    else:
        init_type = 'init'
        init = tf.constant_initializer(0.0)
    info[name + '/biases'] = init_type
    return init, shape  #, callback
Exemple #2
0
 def add_info(name, z, pre=None, info=DummyDict()):
     info['activations'][name] = z
     if info['config'].get('save_pre'):
         info['activations']['pre:' + name] = pre
     if info.get('scale_summary'):
         with tf.name_scope('activation'):
             tf.summary.scalar('activation/' + name, tf.sqrt(tf.reduce_mean(z**2)))
Exemple #3
0
def _pretrained_alex_inner_weights_initializer(name,
                                               data,
                                               info=DummyDict(),
                                               pre_adjust_batch_norm=False,
                                               prefix=''):
    shape = None
    if name in data and '0' in data[name]:
        tr = {'fc6': (4096, 256, 6, 6)}
        W = caffe.from_caffe(data[name]['0'],
                             name=name,
                             conv_fc_transitionals=tr,
                             color_layer='conv1')
        W = W.reshape((-1, W.shape[-1]))

        init_type = 'file'
        bn_name = 'batch_' + name
        if pre_adjust_batch_norm and bn_name in data:
            bn_data = data[bn_name]
            sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
            W /= sigma
            init_type += ':batch-adjusted'
        init = tf.constant_initializer(W.copy())
        shape = W.shape
    else:
        init_type = 'init'
        init = tf.contrib.layers.variance_scaling_initializer()
    info[prefix + ':' + name + '/weights'] = init_type
    return init, shape
Exemple #4
0
def _pretrained_vgg_inner_weights_initializer(name,
                                              data,
                                              info=DummyDict(),
                                              pre_adjust_batch_norm=False,
                                              prefix=''):
    shape = None
    if name in data and '0' in data[name]:
        W = data[name]['0']
        if name == 'fc6':
            W = W.reshape(W.shape[0], 512, 7,
                          7).transpose(0, 2, 3, 1).reshape(4096, -1).T
        else:
            W = W.T
        init_type = 'file'
        bn_name = 'batch_' + name
        if pre_adjust_batch_norm and bn_name in data:
            bn_data = data[bn_name]
            sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
            W /= sigma
            init_type += ':batch-adjusted'
        init = tf.constant_initializer(W.copy())
        shape = W.shape
    else:
        init_type = 'init'
        init = tf.contrib.layers.variance_scaling_initializer()
    info[prefix + ':' + name + '/weights'] = init_type
    return init, shape
Exemple #5
0
def vgg_inner(x,
              channels,
              info=DummyDict(),
              stddev=None,
              activation=tf.nn.relu,
              name=None,
              parameters={},
              parameter_name=None,
              prefix=''):
    if parameter_name is None:
        parameter_name = name
    with tf.name_scope(name):
        f = channels
        features = np.prod(x.get_shape().as_list()[1:])
        xflat = tf.reshape(x, [-1, features])
        shape = [features, channels]

        W_init, W_shape = _pretrained_vgg_inner_weights_initializer(
            parameter_name, parameters, info=info.get('init'), prefix=prefix)
        b_init, b_shape = _pretrained_vgg_biases_initializer(
            parameter_name, parameters, info=info.get('init'), prefix=prefix)

        assert W_shape is None or tuple(W_shape) == tuple(
            shape), "Incorrect weights shape for %s" % name
        assert b_shape is None or tuple(b_shape) == (
            f, ), "Incorrect bias shape for %s" % name

        with tf.variable_scope(name):
            W = tf.get_variable('weights',
                                shape,
                                dtype=tf.float32,
                                initializer=W_init)
            b = tf.get_variable('biases', [f],
                                dtype=tf.float32,
                                initializer=b_init)

        z = tf.nn.bias_add(tf.matmul(xflat, W), b)

    if info['config'].get('save_pre'):
        info['activations']['pre:' + name] = z

    if activation is not None:
        z = activation(z)
    info['activations'][name] = z

    if info.get('scale_summary'):
        with tf.name_scope('activation'):
            tf.summary.scalar('activation/' + name,
                              tf.sqrt(tf.reduce_mean(z**2)))

    if 'weights' in info:
        info['weights'][name + ':weights'] = W
        info['weights'][name + ':biases'] = b
    return z
Exemple #6
0
def _pretrained_resnet_inner_weights_initializer(name,
                                                 data,
                                                 info=DummyDict(),
                                                 full_info=DummyDict(),
                                                 pre_adjust_batch_norm=False,
                                                 bn_name=None):
    shape = None
    mu = 0.0
    sg = 1.0
    if name in data and '0' in data[name]:
        W = data[name]['0']
        W = W.T
        init_type = 'file'
        #if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
        #    bn_data = data[bn_name]
        #    sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
        #    W /= sigma
        #    init_type += ':batch-adjusted'
        if pre_adjust_batch_norm and bn_name is not None and bn_name in data:
            bn_data = data[bn_name]
            bn_sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
            sc_sigma = data[scale_name]['0']
            #W /= bn_sigma / sc_sigma
            #callback = lambda x: x * sc_sigma / bn_sigma
            #mu = -bn_data['0'] / bn_data['2'] * sc_sigma / bn_sigma
            mu = data[scale_name][
                '1'] - bn_data['0'] / bn_data['2'] * sc_sigma / bn_sigma
            #mu = data[scale_name]['1']
            sg = sc_sigma / bn_sigma
            init_type += ':batch-adjusted'  #(W*={})'.format(sc_sigma / bn_sigma)
        init = tf.constant_initializer(W.copy())
        #if full_info['config']['return_weights']:
        #full_info['weights'][name+':weights'] = W
        shape = W.shape
    else:
        init_type = 'init'
        init = tf.contrib.layers.variance_scaling_initializer()
    info[name + '/weights'] = init_type
    return init, shape, mu, sg
Exemple #7
0
def resnet_inner(x,
                 channels,
                 info=DummyDict(),
                 stddev=None,
                 activation=tf.nn.relu,
                 name=None,
                 parameters={},
                 parameter_name=None):
    if parameter_name is None:
        parameter_name = name
    with tf.name_scope(name):
        f = channels
        features = np.prod(x.get_shape().as_list()[1:])
        xflat = tf.reshape(x, [-1, features])
        shape = [features, channels]

        W_init, W_shape, mu, sg = _pretrained_resnet_inner_weights_initializer(
            parameter_name, parameters, info=info.get('init'))
        b_init, b_shape = _pretrained_resnet_biases_initializer(
            parameter_name, parameters, info=info.get('init'))

        assert W_shape is None or tuple(W_shape) == tuple(
            shape
        ), "Incorrect weights shape for {} (file: {}, spec: {})".format(
            name, W_shape, shape)
        assert b_shape is None or tuple(b_shape) == (
            f, ), "Incorrect bias shape for {} (file: {}, spec; {})".format(
                name, b_shape, (f, ))

        with tf.variable_scope(name):
            W = tf.get_variable('weights',
                                shape,
                                dtype=tf.float32,
                                initializer=W_init)
            #b = tf.get_variable('biases', [f], dtype=tf.float32,
            #initializer=b_init)

        z = tf.matmul(xflat, W)
        z = z * sg + mu
        #z = tf.nn.bias_add(z, b)
    if activation is not None:
        z = activation(z)
    info['activations'][name] = z

    if info.get('scale_summary'):
        with tf.name_scope('activation'):
            tf.summary.scalar('activation/' + name,
                              tf.sqrt(tf.reduce_mean(z**2)))

    return z
Exemple #8
0
def _pretrained_vgg_biases(name,
                           data,
                           info=DummyDict(),
                           pre_adjust_batch_norm=False):
    shape = None
    if name in data and '1' in data[name]:
        init_type = 'file'
        bias = data[name]['1'].copy()
        bn_name = 'batch_' + name
        if pre_adjust_batch_norm and bn_name in data:
            bn_data = data[bn_name]
            sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
            mu = bn_data['0'] / bn_data['2']
            bias = (bias - mu) / sigma
            init_type += ':batch-adjusted'
        shape = bias.shape
    else:
        init_type = 'init'
        bias = 0.0
    return bias
Exemple #9
0
def _pretrained_vgg_biases_initializer(name,
                                       data,
                                       info=DummyDict(),
                                       pre_adjust_batch_norm=False,
                                       prefix=''):
    shape = None
    if name in data and '1' in data[name]:
        init_type = 'file'
        bias = data[name]['1'].copy()
        bn_name = 'batch_' + name
        if pre_adjust_batch_norm and bn_name in data:
            bn_data = data[bn_name]
            sigma = np.sqrt(1e-5 + bn_data['1'] / bn_data['2'])
            mu = bn_data['0'] / bn_data['2']
            bias = (bias - mu) / sigma
            init_type += ':batch-adjusted'
        init = tf.constant_initializer(bias)
        shape = bias.shape
    else:
        init_type = 'init'
        init = tf.constant_initializer(0.0)
    info[prefix + ':' + name + '/biases'] = init_type
    return init, shape
Exemple #10
0
def build_network(x,
                  info=DummyDict(),
                  parameters={},
                  phase_test=None,
                  convolutional=False,
                  final_layer=True,
                  activation=tf.nn.relu,
                  well_behaved_size=False,
                  global_step=None,
                  use_lrn=True,
                  prefix='',
                  use_dropout=True):

    # Set up AlexNet
    #conv = functools.partial(alex_conv, size=3, parameters=parameters,
    #info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
    #pool = functools.partial(ops.max_pool, info=info)
    if use_dropout:
        dropout = functools.partial(ops.dropout,
                                    phase_test=phase_test,
                                    info=info)
    else:

        def dropout(x, *args, **kwargs):
            return x

    def add_info(name, z, pre=None, info=DummyDict()):
        info['activations'][name] = z
        if info['config'].get('save_pre'):
            info['activations']['pre:' + name] = pre
        if info.get('scale_summary'):
            with tf.name_scope('activation'):
                tf.summary.scalar('activation/' + name,
                                  tf.sqrt(tf.reduce_mean(z**2)))

    if activation is None:
        activation = lambda x: x

    W_init = tf.contrib.layers.xavier_initializer_conv2d()
    W_init_fc = tf.contrib.layers.xavier_initializer()
    b_init = tf.constant_initializer(0.0)

    k_h = 11
    k_w = 11
    c_o = 96
    s_h = 4
    s_w = 4
    padding = 'VALID'
    if convolutional or well_behaved_size:
        padding = 'SAME'
    name = prefix + 'conv1'
    with tf.variable_scope(name):
        sh = [k_h, k_w, x.get_shape().as_list()[3], c_o]
        conv1W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv1b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv1W
        info['weights'][name + ':biases'] = conv1b
    info['weights'][name + ':weights'] = conv1W
    info['weights'][name + ':biases'] = conv1b
    conv1 = conv(x,
                 conv1W,
                 conv1b,
                 k_h,
                 k_w,
                 c_o,
                 s_h,
                 s_w,
                 padding=padding,
                 group=1)
    conv1 = batch_norm(conv1,
                       global_step=global_step,
                       phase_test=phase_test,
                       name=name)
    pre = conv1
    conv1 = activation(conv1)
    add_info(name, conv1, pre=pre, info=info)

    c_o_old = c_o

    #lrn1
    #lrn(2, 2e-05, 0.75, name='norm1')
    radius = 2
    alpha = 2e-05
    beta = 0.75
    bias = 1.0
    if use_lrn:
        lrn1 = tf.nn.local_response_normalization(conv1,
                                                  depth_radius=radius,
                                                  alpha=alpha,
                                                  beta=beta,
                                                  bias=bias)
        info['activations']['lrn1'] = lrn1
    else:
        lrn1 = conv1

    #maxpool1
    #max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
    k_h = 3
    k_w = 3
    s_h = 2
    s_w = 2
    padding = 'VALID'
    if convolutional or well_behaved_size:
        padding = 'SAME'
    maxpool1 = tf.nn.max_pool(lrn1,
                              ksize=[1, k_h, k_w, 1],
                              strides=[1, s_h, s_w, 1],
                              padding=padding)
    info['activations']['maxpool1'] = maxpool1

    #conv2
    #conv(5, 5, 256, 1, 1, group=2, name='conv2')
    k_h = 5
    k_w = 5
    c_o = 256
    s_h = 1
    s_w = 1
    group = 2
    #conv2W = tf.Variable(net_data["conv2"][0])
    #conv2b = tf.Variable(net_data["conv2"][1])
    name = prefix + 'conv2'
    with tf.variable_scope(name):
        sh = [k_h, k_w, c_o_old // group, c_o]
        conv2W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv2b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv2W
        info['weights'][name + ':biases'] = conv2b

    conv2 = conv(maxpool1,
                 conv2W,
                 conv2b,
                 k_h,
                 k_w,
                 c_o,
                 s_h,
                 s_w,
                 padding="SAME",
                 group=group)
    conv2 = batch_norm(conv2,
                       global_step=global_step,
                       phase_test=phase_test,
                       name=name)
    pre = conv2
    conv2 = activation(conv2)
    add_info(name, conv2, pre=pre, info=info)

    #lrn2
    #lrn(2, 2e-05, 0.75, name='norm2')
    radius = 2
    alpha = 2e-05
    beta = 0.75
    bias = 1.0
    if use_lrn:
        lrn2 = tf.nn.local_response_normalization(conv2,
                                                  depth_radius=radius,
                                                  alpha=alpha,
                                                  beta=beta,
                                                  bias=bias)
    else:
        lrn2 = conv2

    #maxpool2
    #max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
    k_h = 3
    k_w = 3
    s_h = 2
    s_w = 2
    padding = 'VALID'
    if convolutional or well_behaved_size:
        padding = 'SAME'
    maxpool2 = tf.nn.max_pool(lrn2,
                              ksize=[1, k_h, k_w, 1],
                              strides=[1, s_h, s_w, 1],
                              padding=padding)
    info['activations'][prefix + 'pool2'] = maxpool2

    c_o_old = c_o

    k_h = 3
    k_w = 3
    c_o = 384
    s_h = 1
    s_w = 1
    group = 1
    name = prefix + 'conv3'
    with tf.variable_scope(name):
        sh = [k_h, k_w, c_o_old // group, c_o]
        conv3W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv3b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv3W
        info['weights'][name + ':biases'] = conv3b
    conv3 = conv(maxpool2,
                 conv3W,
                 conv3b,
                 k_h,
                 k_w,
                 c_o,
                 s_h,
                 s_w,
                 padding="SAME",
                 group=group)
    conv3 = batch_norm(conv3,
                       global_step=global_step,
                       phase_test=phase_test,
                       name=name)
    pre = conv3
    conv3 = activation(conv3)
    add_info(name, conv3, pre=pre, info=info)

    c_o_old = c_o

    k_h = 3
    k_w = 3
    c_o = 384
    s_h = 1
    s_w = 1
    group = 2
    name = prefix + 'conv4'
    with tf.variable_scope(name):
        sh = [k_h, k_w, c_o_old // group, c_o]
        conv4W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv4b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv4W
        info['weights'][name + ':biases'] = conv4b
    conv4 = conv(conv3,
                 conv4W,
                 conv4b,
                 k_h,
                 k_w,
                 c_o,
                 s_h,
                 s_w,
                 padding="SAME",
                 group=group)
    conv4 = batch_norm(conv4,
                       global_step=global_step,
                       phase_test=phase_test,
                       name=name)
    pre = conv4
    conv4 = activation(conv4)
    add_info(name, conv4, pre=pre, info=info)

    c_o_old = c_o

    k_h = 3
    k_w = 3
    c_o = 256
    s_h = 1
    s_w = 1
    group = 2
    name = prefix + 'conv5'
    with tf.variable_scope(name):
        sh = [k_h, k_w, c_o_old // group, c_o]
        conv5W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv5b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv5W
        info['weights'][name + ':biases'] = conv5b
    conv5 = conv(conv4,
                 conv5W,
                 conv5b,
                 k_h,
                 k_w,
                 c_o,
                 s_h,
                 s_w,
                 padding="SAME",
                 group=group)
    conv5 = batch_norm(conv5,
                       global_step=global_step,
                       phase_test=phase_test,
                       name=name)
    pre = conv5
    conv5 = activation(conv5)
    add_info(name, conv5, pre=pre, info=info)

    #maxpool5
    #max_pool(3, 3, 2, 2, padding='VALID', name='pool5')
    k_h = 3
    k_w = 3
    s_h = 2
    s_w = 2
    padding = 'VALID'
    if convolutional or well_behaved_size:
        padding = 'SAME'
    maxpool5 = tf.nn.max_pool(conv5,
                              ksize=[1, k_h, k_w, 1],
                              strides=[1, s_h, s_w, 1],
                              padding=padding)
    info['activations']['pool5'] = maxpool5

    c_o_old = np.prod(maxpool5.get_shape().as_list()[1:])

    channels = maxpool5.get_shape().as_list()[-1]

    info['activations'][prefix + 'conv1'] = conv1
    info['activations'][prefix + 'conv2'] = conv2
    info['activations'][prefix + 'conv3'] = conv3
    info['activations'][prefix + 'conv4'] = conv4
    info['activations'][prefix + 'conv5'] = conv5

    # Set up weights and biases for fc6/fc7, so that if they are not used, they
    # are still set up (otherwise reuse=True will fail)
    name = prefix + 'fc6'
    with tf.variable_scope(name):
        c_o = 4096
        sh = [6, 6, channels, c_o]
        fc6W = tf.get_variable('weights',
                               sh,
                               dtype=tf.float32,
                               initializer=W_init_fc)
        fc6b = tf.get_variable('biases', [c_o],
                               dtype=tf.float32,
                               initializer=b_init)
    fc6_bn_mean, fc6_bn_var = init_batch_norm_vars(name, [c_o])
    if 'weights' in info:
        info['weights'][name + ':weights'] = fc6W
        info['weights'][name + ':biases'] = fc6b

    name = prefix + 'fc7'
    with tf.variable_scope(name):
        c_old_o = c_o
        c_o = 4096
        sh = [1, 1, c_old_o, c_o]
        fc7W = tf.get_variable('weights',
                               sh,
                               dtype=tf.float32,
                               initializer=W_init_fc)
        fc7b = tf.get_variable('biases', [c_o],
                               dtype=tf.float32,
                               initializer=b_init)
    fc7_bn_mean, fc7_bn_var = init_batch_norm_vars(name, [c_o])
    if 'weights' in info:
        info['weights'][name + ':weights'] = fc7W
        info['weights'][name + ':biases'] = fc7b

    if maxpool5.get_shape().as_list()[1:3] != [6, 6] and not convolutional:
        print('RETURNING PRE-FULLY-CONNECTED')
        return maxpool5

    if convolutional:
        name = prefix + 'fc6'
        #fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b)
        #fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b)
        conv6 = tf.nn.conv2d(maxpool5,
                             fc6W,
                             strides=[1, 1, 1, 1],
                             padding='SAME')
        fc6_in = tf.nn.bias_add(conv6, fc6b)
        fc6 = fc6_in
        fc6 = batch_norm(fc6,
                         global_step=global_step,
                         phase_test=phase_test,
                         name=name,
                         bn_mean=fc6_bn_mean,
                         bn_var=fc6_bn_var)
        pre = fc6
        fc6 = tf.nn.relu(fc6)
        add_info(name + ':nodropout', fc6, pre=fc6_in, info=info)
        fc6 = dropout(fc6, 0.5)
        add_info(name, fc6, pre=pre, info=info)

        c_o_old = c_o

        c_o = 4096

        name = prefix + 'fc7'
        #fc7 = tf.nn.relu_layer(fc6, fc7W, fc7b)
        conv7 = tf.nn.conv2d(fc6, fc7W, strides=[1, 1, 1, 1], padding='SAME')
        fc7_in = tf.nn.bias_add(conv7, fc7b)
        fc7 = fc7_in
        fc7 = batch_norm(fc7,
                         global_step=global_step,
                         phase_test=phase_test,
                         name=name,
                         bn_mean=fc7_bn_mean,
                         bn_var=fc7_bn_var)
        pre = fc7
        fc7 = tf.nn.relu(fc7)
        add_info(name + ':nodropout', fc7, pre=fc7_in, info=info)
        fc7 = dropout(fc7, 0.5)
        add_info(name, fc7, pre=pre, info=info)

        c_o_old = c_o

        if final_layer:
            c_o = 1000

            name = prefix + 'fc8'
            with tf.variable_scope(name):
                sh = [1, 1, c_o_old, c_o]
                fc8W = tf.get_variable('weights',
                                       sh,
                                       dtype=tf.float32,
                                       initializer=W_init_fc)
                fc8b = tf.get_variable('biases', [c_o],
                                       dtype=tf.float32,
                                       initializer=b_init)
            if 'weights' in info:
                info['weights'][name + ':weights'] = fc8W
                info['weights'][name + ':biases'] = fc8b
            #fc8 = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
            conv8 = tf.nn.conv2d(fc7,
                                 fc8W,
                                 strides=[1, 1, 1, 1],
                                 padding='SAME')
            fc8 = tf.nn.bias_add(conv8, fc8b)
            info['activations'][name] = fc8
        else:
            fc8 = fc7

    else:
        sh_fc = [c_o_old, c_o]
        fc6W = tf.reshape(fc6W, sh_fc)

        name = prefix + 'fc6'
        maxpool5_flat = tf.reshape(
            maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))])
        #fc6 = tf.nn.relu_layer(maxpool5_flat, fc6W, fc6b, name=name)
        fc6_in = tf.nn.bias_add(tf.matmul(maxpool5_flat, fc6W), fc6b)
        fc6 = fc6_in
        fc6 = batch_norm(fc6,
                         global_step=global_step,
                         phase_test=phase_test,
                         name=name,
                         bn_mean=fc6_bn_mean,
                         bn_var=fc6_bn_var)
        pre = fc6
        fc6 = tf.nn.relu(fc6, name=name)
        add_info(name + ':nodropout', fc6, pre=fc6_in, info=info)
        fc6 = dropout(fc6, 0.5)
        add_info(name, fc6, pre=pre, info=info)

        c_o_old = c_o

        c_o = 4096

        name = prefix + 'fc7'
        fc7W = tf.squeeze(fc7W, [0, 1])

        #fc7 = tf.nn.relu_layer(fc6, fc7W, fc7b, name=name)
        fc7_in = tf.nn.bias_add(tf.matmul(fc6, fc7W), fc7b)
        fc7 = fc7_in
        fc7 = batch_norm(fc7,
                         global_step=global_step,
                         phase_test=phase_test,
                         name=name,
                         bn_mean=fc7_bn_mean,
                         bn_var=fc7_bn_var)
        pre = fc7
        fc7 = tf.nn.relu(fc7, name=name)
        add_info(name + ':nodropout', fc7, pre=fc7_in, info=info)
        fc7 = dropout(fc7, 0.5)
        add_info(name, fc7, pre=pre, info=info)

        c_o_old = c_o
        c_o = 1000

        if final_layer:
            name = prefix + 'fc8'
            with tf.variable_scope(name):
                sh = [c_o_old, c_o]
                fc8W = tf.get_variable('weights',
                                       sh,
                                       dtype=tf.float32,
                                       initializer=W_init_fc)
                fc8b = tf.get_variable('biases', [c_o],
                                       dtype=tf.float32,
                                       initializer=b_init)
            if 'weights' in info:
                info['weights'][name + ':weights'] = fc8W
                info['weights'][name + ':biases'] = fc8b
            fc8 = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
            info['activations'][name] = fc8
        else:
            fc8 = fc7

    info['activations'][prefix + 'fc6'] = fc6
    info['activations'][prefix + 'fc7'] = fc7

    return fc8
Exemple #11
0
 def decoder(self, z, channels=1, multiple=4, from_name=None, settings=DummyDict(), info=DummyDict()):
     raise NotImplemented()
Exemple #12
0
def build_network(x,
                  info=DummyDict(),
                  parameters={},
                  hole=1,
                  phase_test=None,
                  convolutional=False,
                  final_layer=True,
                  activation=tf.nn.relu,
                  pre_adjust_batch_norm=False,
                  well_behaved_size=False,
                  use_lrn=True,
                  prefix='',
                  use_dropout=True):

    # Set up AlexNet
    #conv = functools.partial(alex_conv, size=3, parameters=parameters,
    #info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
    #pool = functools.partial(ops.max_pool, info=info)
    if use_dropout:
        dropout = functools.partial(ops.dropout,
                                    phase_test=phase_test,
                                    info=info)
    else:

        def dropout(x, *args, **kwargs):
            return x

    def add_info(name, z, pre=None, info=DummyDict()):
        info['activations'][name] = z
        if info['config'].get('save_pre'):
            info['activations']['pre:' + name] = pre
        if info.get('scale_summary'):
            with tf.name_scope('activation'):
                tf.summary.scalar('activation/' + name,
                                  tf.sqrt(tf.reduce_mean(z**2)))

    if activation is None:
        activation = lambda x: x

    W_init = tf.contrib.layers.variance_scaling_initializer()
    b_init = tf.constant_initializer(0.0)

    W_init, W_shape = _pretrained_alex_conv_weights_initializer(
        'conv1',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    b_init, b_shape = _pretrained_alex_biases_initializer(
        'conv1',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)

    k_h = 11
    k_w = 11
    c_o = 96
    s_h = 4
    s_w = 4
    padding = 'VALID'
    if convolutional or well_behaved_size:
        padding = 'SAME'
    #conv1W = tf.Variable(net_data["conv1"][0])
    #conv1b = tf.Variable(net_data["conv1"][1])
    name = prefix + 'conv1'
    with tf.variable_scope(name):
        sh = [k_h, k_w, x.get_shape().as_list()[3], c_o]
        assert W_shape is None or tuple(sh) == tuple(W_shape), (sh, W_shape)
        conv1W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv1b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv1W
        info['weights'][name + ':biases'] = conv1b
    info['weights'][name + ':weights'] = conv1W
    info['weights'][name + ':biases'] = conv1b
    conv1_in = conv(x,
                    conv1W,
                    conv1b,
                    k_h,
                    k_w,
                    c_o,
                    s_h,
                    s_w,
                    padding=padding,
                    group=1)
    conv1 = activation(conv1_in)
    add_info(name, conv1, pre=conv1_in, info=info)

    c_o_old = c_o

    #lrn1
    #lrn(2, 2e-05, 0.75, name='norm1')
    radius = 2
    alpha = 2e-05
    beta = 0.75
    bias = 1.0
    if use_lrn:
        lrn1 = tf.nn.local_response_normalization(conv1,
                                                  depth_radius=radius,
                                                  alpha=alpha,
                                                  beta=beta,
                                                  bias=bias)
        info['activations']['lrn1'] = lrn1
    else:
        lrn1 = conv1

    #maxpool1
    #max_pool(3, 3, 2, 2, padding='VALID', name='pool1')
    k_h = 3
    k_w = 3
    s_h = 2
    s_w = 2
    padding = 'VALID'
    if convolutional or well_behaved_size:
        padding = 'SAME'
    maxpool1 = tf.nn.max_pool(lrn1,
                              ksize=[1, k_h, k_w, 1],
                              strides=[1, s_h, s_w, 1],
                              padding=padding)
    info['activations']['maxpool1'] = maxpool1

    W_init, W_shape = _pretrained_alex_conv_weights_initializer(
        'conv2',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    b_init, b_shape = _pretrained_alex_biases_initializer(
        'conv2',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    #conv2
    #conv(5, 5, 256, 1, 1, group=2, name='conv2')
    k_h = 5
    k_w = 5
    c_o = 256
    s_h = 1
    s_w = 1
    group = 2
    #conv2W = tf.Variable(net_data["conv2"][0])
    #conv2b = tf.Variable(net_data["conv2"][1])
    name = prefix + 'conv2'
    with tf.variable_scope(name):
        sh = [k_h, k_w, c_o_old // group, c_o]
        assert W_shape is None or tuple(sh) == tuple(W_shape), (sh, W_shape)
        conv2W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv2b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv2W
        info['weights'][name + ':biases'] = conv2b

    conv2_in = conv(maxpool1,
                    conv2W,
                    conv2b,
                    k_h,
                    k_w,
                    c_o,
                    s_h,
                    s_w,
                    padding="SAME",
                    group=group)
    conv2 = activation(conv2_in)
    add_info(name, conv2, pre=conv2_in, info=info)

    #lrn2
    #lrn(2, 2e-05, 0.75, name='norm2')
    radius = 2
    alpha = 2e-05
    beta = 0.75
    bias = 1.0
    if use_lrn:
        lrn2 = tf.nn.local_response_normalization(conv2,
                                                  depth_radius=radius,
                                                  alpha=alpha,
                                                  beta=beta,
                                                  bias=bias)
    else:
        lrn2 = conv2

    #maxpool2
    #max_pool(3, 3, 2, 2, padding='VALID', name='pool2')
    k_h = 3
    k_w = 3
    s_h = 2
    s_w = 2
    padding = 'VALID'
    if convolutional or well_behaved_size:
        padding = 'SAME'
    maxpool2 = tf.nn.max_pool(lrn2,
                              ksize=[1, k_h, k_w, 1],
                              strides=[1, s_h, s_w, 1],
                              padding=padding)
    info['activations'][prefix + 'pool2'] = maxpool2

    c_o_old = c_o

    #conv3
    #conv(3, 3, 384, 1, 1, name='conv3')
    k_h = 3
    k_w = 3
    c_o = 384
    s_h = 1
    s_w = 1
    group = 1
    #conv3W = tf.Variable(net_data["conv3"][0])
    #conv3b = tf.Variable(net_data["conv3"][1])
    W_init, W_shape = _pretrained_alex_conv_weights_initializer(
        'conv3',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    b_init, b_shape = _pretrained_alex_biases_initializer(
        'conv3',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    name = prefix + 'conv3'
    with tf.variable_scope(name):
        sh = [k_h, k_w, c_o_old // group, c_o]
        assert W_shape is None or tuple(sh) == tuple(W_shape), (sh, W_shape)
        conv3W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv3b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv3W
        info['weights'][name + ':biases'] = conv3b
    conv3_in = conv(maxpool2,
                    conv3W,
                    conv3b,
                    k_h,
                    k_w,
                    c_o,
                    s_h,
                    s_w,
                    padding="SAME",
                    group=group)
    conv3 = activation(conv3_in)
    add_info(name, conv3, pre=conv3_in, info=info)

    c_o_old = c_o

    #conv4
    #conv(3, 3, 384, 1, 1, group=2, name='conv4')
    k_h = 3
    k_w = 3
    c_o = 384
    s_h = 1
    s_w = 1
    group = 2
    #conv4W = tf.Variable(net_data["conv4"][0])
    #conv4b = tf.Variable(net_data["conv4"][1])
    W_init, W_shape = _pretrained_alex_conv_weights_initializer(
        'conv4',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    b_init, b_shape = _pretrained_alex_biases_initializer(
        'conv4',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    name = prefix + 'conv4'
    with tf.variable_scope(name):
        sh = [k_h, k_w, c_o_old // group, c_o]
        assert W_shape is None or tuple(sh) == tuple(W_shape), (sh, W_shape)
        conv4W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv4b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv4W
        info['weights'][name + ':biases'] = conv4b
    conv4_in = conv(conv3,
                    conv4W,
                    conv4b,
                    k_h,
                    k_w,
                    c_o,
                    s_h,
                    s_w,
                    padding="SAME",
                    group=group)
    conv4 = activation(conv4_in)
    add_info(name, conv4, pre=conv4_in, info=info)

    c_o_old = c_o

    #conv5
    #conv(3, 3, 256, 1, 1, group=2, name='conv5')
    k_h = 3
    k_w = 3
    c_o = 256
    s_h = 1
    s_w = 1
    group = 2
    #conv5W = tf.Variable(net_data["conv5"][0])
    #conv5b = tf.Variable(net_data["conv5"][1])
    W_init, W_shape = _pretrained_alex_conv_weights_initializer(
        'conv5',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    b_init, b_shape = _pretrained_alex_biases_initializer(
        'conv5',
        parameters,
        info=info.get('init'),
        pre_adjust_batch_norm=pre_adjust_batch_norm,
        prefix=prefix)
    name = prefix + 'conv5'
    with tf.variable_scope(name):
        sh = [k_h, k_w, c_o_old // group, c_o]
        assert W_shape is None or tuple(sh) == tuple(W_shape), (sh, W_shape)
        conv5W = tf.get_variable('weights',
                                 sh,
                                 dtype=tf.float32,
                                 initializer=W_init)
        conv5b = tf.get_variable('biases', [c_o],
                                 dtype=tf.float32,
                                 initializer=b_init)
    if 'weights' in info:
        info['weights'][name + ':weights'] = conv5W
        info['weights'][name + ':biases'] = conv5b
    conv5_in = conv(conv4,
                    conv5W,
                    conv5b,
                    k_h,
                    k_w,
                    c_o,
                    s_h,
                    s_w,
                    padding="SAME",
                    group=group)
    conv5 = activation(conv5_in)
    add_info(name, conv5, pre=conv5_in, info=info)

    #maxpool5
    #max_pool(3, 3, 2, 2, padding='VALID', name='pool5')
    k_h = 3
    k_w = 3
    s_h = 2
    s_w = 2
    padding = 'VALID'
    if convolutional or well_behaved_size:
        padding = 'SAME'
    maxpool5 = tf.nn.max_pool(conv5,
                              ksize=[1, k_h, k_w, 1],
                              strides=[1, s_h, s_w, 1],
                              padding=padding)
    info['activations']['pool5'] = maxpool5

    c_o_old = np.prod(maxpool5.get_shape().as_list()[1:])

    c_o = 4096
    channels = maxpool5.get_shape().as_list()[-1]

    if convolutional:
        W_init, W_shape = _pretrained_alex_conv_weights_initializer(
            'fc6',
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        b_init, b_shape = _pretrained_alex_biases_initializer(
            'fc6',
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        #fc6
        #fc(4096, name='fc6')
        #fc6W = tf.Variable(net_data["fc6"][0])
        #fc6b = tf.Variable(net_data["fc6"][1])
        name = prefix + 'fc6'
        with tf.variable_scope(name):
            sh = [6, 6, channels, c_o]
            assert W_shape is None or tuple(sh) == tuple(W_shape), (sh,
                                                                    W_shape)
            fc6W = tf.get_variable('weights',
                                   sh,
                                   dtype=tf.float32,
                                   initializer=W_init)
            fc6b = tf.get_variable('biases', [c_o],
                                   dtype=tf.float32,
                                   initializer=b_init)
        if 'weights' in info:
            info['weights'][name + ':weights'] = fc6W
            info['weights'][name + ':biases'] = fc6b
        #fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b)
        #fc6 = tf.nn.relu_layer(tf.reshape(maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))]), fc6W, fc6b)
        conv6 = tf.nn.conv2d(maxpool5,
                             fc6W,
                             strides=[1, 1, 1, 1],
                             padding='SAME')
        fc6_in = tf.nn.bias_add(conv6, fc6b)
        fc6 = tf.nn.relu(fc6_in)
        add_info(name + ':nodropout', fc6, pre=fc6_in, info=info)
        fc6 = dropout(fc6, 0.5)
        add_info(name, fc6, pre=fc6_in, info=info)

        c_o_old = c_o

        c_o = 4096

        W_init, W_shape = _pretrained_alex_conv_weights_initializer(
            'fc7',
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        b_init, b_shape = _pretrained_alex_biases_initializer(
            'fc7',
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        #fc7
        #fc(4096, name='fc7')
        #fc7W = tf.Variable(net_data["fc7"][0])
        #fc7b = tf.Variable(net_data["fc7"][1])
        name = prefix + 'fc7'
        with tf.variable_scope(name):
            sh = [1, 1, c_o_old, c_o]
            assert W_shape is None or tuple(sh) == tuple(W_shape), (sh,
                                                                    W_shape)
            fc7W = tf.get_variable('weights',
                                   sh,
                                   dtype=tf.float32,
                                   initializer=W_init)
            fc7b = tf.get_variable('biases', [c_o],
                                   dtype=tf.float32,
                                   initializer=b_init)
        if 'weights' in info:
            info['weights'][name + ':weights'] = fc7W
            info['weights'][name + ':biases'] = fc7b
        #fc7 = tf.nn.relu_layer(fc6, fc7W, fc7b)
        conv7 = tf.nn.conv2d(fc6, fc7W, strides=[1, 1, 1, 1], padding='SAME')
        fc7_in = tf.nn.bias_add(conv7, fc7b)
        fc7 = tf.nn.relu(fc7_in)
        add_info(name + ':nodropout', fc7, pre=fc7_in, info=info)
        fc7 = dropout(fc7, 0.5)
        add_info(name, fc7, pre=fc7_in, info=info)

        c_o_old = c_o

        if final_layer:
            c_o = 1000

            W_init, W_shape = _pretrained_alex_conv_weights_initializer(
                'fc8',
                parameters,
                info=info.get('init'),
                pre_adjust_batch_norm=pre_adjust_batch_norm,
                prefix=prefix)
            b_init, b_shape = _pretrained_alex_biases_initializer(
                'fc8',
                parameters,
                info=info.get('init'),
                pre_adjust_batch_norm=pre_adjust_batch_norm,
                prefix=prefix)
            name = prefix + 'fc8'
            with tf.variable_scope(name):
                sh = [1, 1, c_o_old, c_o]
                assert W_shape is None or tuple(sh) == tuple(W_shape), (
                    sh, W_shape)
                fc8W = tf.get_variable('weights',
                                       sh,
                                       dtype=tf.float32,
                                       initializer=W_init)
                fc8b = tf.get_variable('biases', [c_o],
                                       dtype=tf.float32,
                                       initializer=b_init)
            if 'weights' in info:
                info['weights'][name + ':weights'] = fc8W
                info['weights'][name + ':biases'] = fc8b
            #fc8 = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
            conv8 = tf.nn.conv2d(fc7,
                                 fc8W,
                                 strides=[1, 1, 1, 1],
                                 padding='SAME')
            fc8 = tf.nn.bias_add(conv8, fc8b)
            info['activations'][name] = fc8
        else:
            fc8 = fc7

    else:
        #fc6
        #fc(4096, name='fc6')
        #fc6W = tf.Variable(net_data["fc6"][0])
        #fc6b = tf.Variable(net_data["fc6"][1])
        W_init, W_shape = _pretrained_alex_inner_weights_initializer(
            'fc6',
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        b_init, b_shape = _pretrained_alex_biases_initializer(
            'fc6',
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        name = prefix + 'fc6'
        with tf.variable_scope(name):
            sh = [6, 6, channels, c_o]
            sh_fc = [c_o_old, c_o]

            assert W_shape is None or (tuple(sh) == tuple(W_shape)
                                       or tuple(W_shape)
                                       == (256 * 6 * 6, 4096)), (sh, W_shape)
            fc6W_conv = tf.get_variable('weights',
                                        sh,
                                        dtype=tf.float32,
                                        initializer=W_init)

            fc6W = tf.reshape(fc6W_conv, sh_fc)
            fc6b = tf.get_variable('biases', [c_o],
                                   dtype=tf.float32,
                                   initializer=b_init)
        if 'weights' in info:
            info['weights'][name + ':weights'] = fc6W
            info['weights'][name + ':biases'] = fc6b
        maxpool5_flat = tf.reshape(
            maxpool5, [-1, int(np.prod(maxpool5.get_shape()[1:]))])
        #fc6 = tf.nn.relu_layer(maxpool5_flat, fc6W, fc6b, name=name)
        fc6_in = tf.nn.bias_add(tf.matmul(maxpool5_flat, fc6W), fc6b)
        fc6 = tf.nn.relu(fc6_in, name=name)
        add_info(name + ':nodropout', fc6, pre=fc6_in, info=info)
        fc6 = dropout(fc6, 0.5)
        add_info(name, fc6, pre=fc6_in, info=info)

        c_o_old = c_o

        c_o = 4096

        W_init, W_shape = _pretrained_alex_inner_weights_initializer(
            'fc7',
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        b_init, b_shape = _pretrained_alex_biases_initializer(
            'fc7',
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        #fc7
        #fc(4096, name='fc7')
        #fc7W = tf.Variable(net_data["fc7"][0])
        #fc7b = tf.Variable(net_data["fc7"][1])
        name = prefix + 'fc7'
        with tf.variable_scope(name):
            sh = [1, 1, c_o_old, c_o]
            fc7W_conv = tf.get_variable('weights',
                                        sh,
                                        dtype=tf.float32,
                                        initializer=W_init)

            fc7W = tf.squeeze(fc7W_conv, [0, 1])
            fc7b = tf.get_variable('biases', [c_o],
                                   dtype=tf.float32,
                                   initializer=b_init)
        if 'weights' in info:
            info['weights'][name + ':weights'] = fc7W
            info['weights'][name + ':biases'] = fc7b
        #fc7 = tf.nn.relu_layer(fc6, fc7W, fc7b, name=name)
        fc7_in = tf.nn.bias_add(tf.matmul(fc6, fc7W), fc7b)
        fc7 = tf.nn.relu(fc7_in, name=name)
        add_info(name + ':nodropout', fc7, pre=fc7_in, info=info)
        fc7 = dropout(fc7, 0.5)
        add_info(name, fc7, pre=fc7_in, info=info)

        c_o_old = c_o
        c_o = 1000

        if final_layer:
            W_init, W_shape = _pretrained_alex_inner_weights_initializer(
                'fc8',
                parameters,
                info=info.get('init'),
                pre_adjust_batch_norm=pre_adjust_batch_norm,
                prefix=prefix)
            b_init, b_shape = _pretrained_alex_biases_initializer(
                'fc8',
                parameters,
                info=info.get('init'),
                pre_adjust_batch_norm=pre_adjust_batch_norm,
                prefix=prefix)
            #fc8
            #fc(1000, relu=False, name='fc8')
            #fc8W = tf.Variable(net_data["fc8"][0])
            #fc8b = tf.Variable(net_data["fc8"][1])
            name = prefix + 'fc8'
            with tf.variable_scope(name):
                sh = [c_o_old, c_o]
                assert W_shape is None or tuple(sh) == tuple(W_shape), (
                    sh, W_shape)
                fc8W = tf.get_variable('weights',
                                       sh,
                                       dtype=tf.float32,
                                       initializer=W_init)
                fc8b = tf.get_variable('biases', [c_o],
                                       dtype=tf.float32,
                                       initializer=b_init)
            if 'weights' in info:
                info['weights'][name + ':weights'] = fc8W
                info['weights'][name + ':biases'] = fc8b
            fc8 = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
            info['activations'][name] = fc8
        else:
            fc8 = fc7

    info['activations'][prefix + 'conv1'] = conv1
    info['activations'][prefix + 'conv2'] = conv2
    info['activations'][prefix + 'conv3'] = conv3
    info['activations'][prefix + 'conv4'] = conv4
    info['activations'][prefix + 'conv5'] = conv5
    info['activations'][prefix + 'fc6'] = fc6
    info['activations'][prefix + 'fc7'] = fc7

    return fc8
Exemple #13
0
def build_network(x,
                  info=DummyDict(),
                  parameters={},
                  hole=1,
                  phase_test=None,
                  convolutional=False,
                  final_layer=True,
                  batch_norm=False,
                  squeezed=False,
                  pre_adjust_batch_norm=False,
                  prefix='',
                  num_features_mult=1.0,
                  use_dropout=True,
                  activation=tf.nn.relu,
                  limit=np.inf,
                  global_step=None):
    def num(f):
        return int(f * num_features_mult)

    def conv(z, ch, **kwargs):
        if 'parameter_name' not in kwargs:
            kwargs['parameter_name'] = kwargs['name']
        kwargs['name'] = prefix + kwargs['name']
        kwargs['size'] = kwargs.get('size', 3)
        kwargs['parameters'] = kwargs.get('parameters', parameters)
        kwargs['info'] = kwargs.get('info', info)
        kwargs['pre_adjust_batch_norm'] = kwargs.get('pre_adjust_batch_norm',
                                                     pre_adjust_batch_norm)
        kwargs['activation'] = kwargs.get('activation', activation)
        kwargs['prefix'] = prefix
        kwargs['batch_norm'] = kwargs.get('batch_norm', batch_norm)
        kwargs['phase_test'] = kwargs.get('phase_test', phase_test)
        kwargs['global_step'] = kwargs.get('global_step', global_step)
        if 'previous' in kwargs:
            kwargs['previous'] = prefix + kwargs['previous']
        return vgg_conv(z, num(ch), **kwargs)

    def inner(z, ch, **kwargs):
        if 'parameter_name' not in kwargs:
            kwargs['parameter_name'] = kwargs['name']
        kwargs['name'] = prefix + kwargs['name']
        kwargs['parameters'] = kwargs.get('parameters', parameters)
        kwargs['prefix'] = prefix
        if 'previous' in kwargs:
            kwargs['previous'] = prefix + kwargs['previous']
        return vgg_inner(z, ch, **kwargs)

    #pool = functools.partial(ops.max_pool, info=info)
    def pool(*args, **kwargs):
        kwargs['name'] = prefix + kwargs['name']
        kwargs['info'] = kwargs.get('info', info)
        return ops.max_pool(*args, **kwargs)

    def dropout(z, rate, **kwargs):
        kwargs['phase_test'] = kwargs.get('phase_test', phase_test)
        kwargs['info'] = kwargs.get('info', info)
        kwargs['name'] = prefix + kwargs['name']
        if use_dropout:
            return ops.dropout(z, rate, **kwargs)
        else:
            return z

    #dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)

    z = x
    if hole == 4:
        apool = functools.partial(ops.atrous_avg_pool,
                                  info=info,
                                  padding='SAME')
        assert convolutional
        #aconv = functools.partial(_atrous_conv, size=3, parameters=parameters,
        #info=info, pre_adjust_batch_norm=pre_adjust_batch_norm)
        z = conv(z, 64, name='conv1_1')
        z = conv(z, 64, name='conv1_2', previous='conv1_1')
        z = pool(z, 2, name='pool1')
        z = conv(z, 128, name='conv2_1', previous='conv1_2')
        z = conv(z, 128, name='conv2_2', previous='conv2_1')
        z = pool(z, 2, name='pool2')
        z = conv(z, 256, name='conv3_1', previous='conv2_2')
        z = conv(z, 256, name='conv3_2', previous='conv3_1')
        z = conv(z, 256, name='conv3_3', previous='conv3_2')
        z = pool(z, 2, name='pool3')
        z = conv(z, 512, name='conv4_1', previous='conv3_3')
        z = conv(z, 512, name='conv4_2', previous='conv4_1')
        z = conv(z, 512, name='conv4_3', previous='conv4_2')
        z = pool(z, 2, stride=1, name='pool4')
        z = conv(z, 512, hole=2, name='conv5_1', previous='conv4_3')
        z = conv(z, 512, hole=2, name='conv5_2', previous='conv5_1')
        z = conv(z, 512, hole=2, name='conv5_3', previous='conv5_2')
        z = apool(z, 2, rate=2, name='pool5')
        z = conv(z,
                 4096,
                 size=7,
                 hole=4,
                 padding='SAME',
                 name='fc6_pre',
                 parameter_name='fc6',
                 previous='conv5_3')
        z = dropout(z, 0.5, name='fc6')
        z = conv(z,
                 4096,
                 size=1,
                 name='fc7_pre',
                 parameter_name='fc7',
                 previous='fc6')
        z = dropout(z, 0.5, name='fc7')
    else:
        z = conv(z, 64, name='conv1_1')
        if limit == 1: return z
        z = conv(z, 64, name='conv1_2', previous='conv1_1')
        z = pool(z, 2, name='pool1')
        if limit == 2: return z
        z = conv(z, 128, name='conv2_1', previous='conv1_2')
        if limit == 3: return z
        z = conv(z, 128, name='conv2_2', previous='conv2_1')
        z = pool(z, 2, name='pool2')
        if limit == 4: return z
        z = conv(z, 256, name='conv3_1', previous='conv2_2')
        if limit == 5: return z
        z = conv(z, 256, name='conv3_2', previous='conv3_1')
        if limit == 6: return z
        z = conv(z, 256, name='conv3_3', previous='conv3_2')
        z = pool(z, 2, name='pool3')
        if limit == 7: return z
        z = conv(z, 512, name='conv4_1', previous='conv3_3')
        if limit == 8: return z
        z = conv(z, 512, name='conv4_2', previous='conv4_1')
        if limit == 9: return z
        z = conv(z, 512, name='conv4_3', previous='conv4_2')
        z = pool(z, 2, name='pool4')
        if limit == 10: return z
        z = conv(z, 512, name='conv5_1', previous='conv4_3')
        if limit == 11: return z
        z = conv(z, 512, name='conv5_2', previous='conv5_1')
        if limit == 12: return z
        z = conv(z, 512, name='conv5_3', previous='conv5_2')
        z = pool(z, 2, name='pool5')
        if limit == 13: return z
        z = conv(z,
                 4096,
                 size=7,
                 padding='VALID' if not convolutional else 'SAME',
                 name='fc6_pre',
                 parameter_name='fc6',
                 previous='conv5_3',
                 squeeze=not convolutional)
        z = dropout(z, 0.5, name='fc6')
        info['activations']
        if limit == 14: return z
        z = conv(z,
                 4096,
                 size=1,
                 name='fc7_pre',
                 parameter_name='fc7',
                 previous='fc6',
                 squeeze=not convolutional)
        z = dropout(z, 0.5, name='fc7')
        if not convolutional and squeezed:
            # Make intermediate activations non-convolutional
            for l in ['fc6', 'fc7']:
                info['activations'][l] = tf.squeeze(info['activations'][l],
                                                    [1, 2])
            z = info['activations']['fc7']

    if final_layer:
        if convolutional:
            z = conv(z,
                     1000,
                     info=info,
                     size=1,
                     parameters=parameters,
                     activation=None,
                     name='fc8')
        else:
            z = inner(z,
                      1000,
                      info=info,
                      parameters=parameters,
                      activation=None,
                      name='fc8')

    return z
Exemple #14
0
def vgg_conv(x,
             channels,
             size=3,
             padding='SAME',
             stride=1,
             hole=1,
             batch_norm=False,
             phase_test=None,
             activation=tf.nn.relu,
             name=None,
             parameter_name=None,
             summarize_scale=False,
             info=DummyDict(),
             parameters={},
             pre_adjust_batch_norm=False,
             edge_bias_fix=False,
             previous=None,
             prefix='',
             use_bias=True,
             scope=None,
             global_step=None,
             squeeze=False):
    if parameter_name is None:
        parameter_name = name
    if scope is None:
        scope = name

    def maybe_squeeze(z):
        if squeeze:
            return tf.squeeze(z, [1, 2])
        else:
            return z

    with tf.name_scope(name):
        features = int(x.get_shape()[3])
        f = channels
        shape = [size, size, features, f]

        W_init, W_shape = _pretrained_vgg_conv_weights_initializer(
            parameter_name,
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)
        b_init, b_shape = _pretrained_vgg_biases_initializer(
            parameter_name,
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            prefix=prefix)

        assert W_shape is None or tuple(W_shape) == tuple(
            shape
        ), "Incorrect weights shape for {} (file: {}, spec: {})".format(
            name, W_shape, shape)
        assert b_shape is None or tuple(b_shape) == (
            f, ), "Incorrect bias shape for {} (file: {}, spec; {})".format(
                name, b_shape, (f, ))

        #import ipdb; ipdb.set_trace()
        with tf.variable_scope(scope):
            W = tf.get_variable('weights',
                                shape,
                                dtype=tf.float32,
                                initializer=W_init)
            b = tf.get_variable('biases', [f],
                                dtype=tf.float32,
                                initializer=b_init)

        if hole == 1:
            conv0 = tf.nn.conv2d(x,
                                 W,
                                 strides=[1, stride, stride, 1],
                                 padding=padding)
        else:
            assert stride == 1
            conv0 = tf.nn.atrous_conv2d(x, W, rate=hole, padding=padding)

        #h1 = tf.nn.bias_add(conv0, b)
        if use_bias:
            h1 = tf.nn.bias_add(conv0, b)
        else:
            h1 = conv0

        if batch_norm:
            assert phase_test is not None, "phase_test required for batch norm"
            mm, vv = tf.nn.moments(h1, [0, 1, 2], name='mommy')
            beta = tf.Variable(tf.constant(0.0, shape=[f]),
                               name='beta',
                               trainable=True)
            gamma = tf.Variable(tf.constant(1.0, shape=[f]),
                                name='gamma',
                                trainable=True)
            #ema = tf.train.ExponentialMovingAverage(decay=0.999)
            ema = ExponentialMovingAverageExtended(decay=0.999,
                                                   value=[0.0, 1.0],
                                                   num_updates=global_step)

            def mean_var_train():
                ema_apply_op = ema.apply([mm, vv])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(ema.average(mm)), tf.identity(
                        ema.average(vv))
                    #return tf.identity(mm), tf.identity(vv)

            def mean_var_test():
                return ema.average(mm), ema.average(vv)

            mean, var = tf.cond(~phase_test, mean_var_train, mean_var_test)

            h2 = tf.nn.batch_normalization(h1, mean, var, beta, gamma, 1e-3)
            z = h2
        else:
            z = h1

        if info['config'].get('save_pre'):
            info['activations']['pre:' + name] = maybe_squeeze(z)

        if activation is not None:
            z = activation(z)

    if info.get('scale_summary'):
        with tf.name_scope('activation'):
            tf.summary.scalar('activation/' + name,
                              tf.sqrt(tf.reduce_mean(z**2)))

    info['activations'][name] = maybe_squeeze(z)
    if 'weights' in info:
        info['weights'][name + ':weights'] = W
        info['weights'][name + ':biases'] = b
    return z
Exemple #15
0
def resnet_conv(x,
                channels,
                size=3,
                padding='SAME',
                stride=1,
                batch_norm=False,
                phase_test=None,
                activation=tf.nn.relu,
                name=None,
                parameter_name=None,
                bn_name=None,
                scale_name=None,
                summarize_scale=False,
                info=DummyDict(),
                parameters={},
                pre_adjust_batch_norm=False,
                iteration=None):
    if parameter_name is None:
        parameter_name = name
    if scale_name is None:
        scale_name = parameter_name
    with tf.name_scope(name):
        features = int(x.get_shape()[3])
        f = channels
        shape = [size, size, features, f]

        W_init, W_shape = _pretrained_resnet_conv_weights_initializer(
            parameter_name, parameters, info=info.get('init'), full_info=info)

        #b_init, b_shape = _pretrained_resnet_biases_initializer(scale_name, parameters,
        #info=info.get('init'),
        #full_info=info,
        #pre_adjust_batch_norm=pre_adjust_batch_norm,
        #bn_name=bn_name)

        assert W_shape is None or tuple(W_shape) == tuple(
            shape
        ), "Incorrect weights shape for {} (file: {}, spec: {})".format(
            name, W_shape, shape)

        with tf.variable_scope(name):
            W = tf.get_variable('weights',
                                shape,
                                dtype=tf.float32,
                                initializer=W_init)

        raw_conv0 = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding)
        #conv0 = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
        if stride > 1:
            conv0 = tf.strided_slice(raw_conv0, [0, 0, 0, 0],
                                     raw_conv0.get_shape(), [1, 2, 2, 1])
        else:
            conv0 = raw_conv0

        z = conv0

        if True:
            assert phase_test is not None, "phase_test required for batch norm"
            if bn_name in parameters:
                bn_data = parameters[bn_name]
                bn_mean = bn_data['0'] / bn_data['2']
                bn_var = bn_data['1'] / bn_data['2']
            else:
                bn_mean = np.zeros(f, dtype=np.float32)
                bn_var = np.full(
                    f, 0.5,
                    dtype=np.float32)  # a bit strange, but we don't know

            if scale_name in parameters:
                mu = parameters[scale_name]['1']
                sg = parameters[scale_name]['0']
            else:
                mu = np.zeros(f, dtype=np.float32)
                sg = np.ones(f, dtype=np.float32)

            mm, vv = tf.nn.moments(z, [0, 1, 2], name='mommy')

            assert mu.size == f
            assert sg.size == f
            beta = tf.Variable(tf.constant(mu, shape=[f]),
                               name='beta',
                               trainable=True)
            gamma = tf.Variable(tf.constant(sg, shape=[f]),
                                name='gamma',
                                trainable=True)
            ema = ExponentialMovingAverageExtended(decay=0.999,
                                                   value=[bn_mean, bn_var],
                                                   num_updates=iteration)

            def mean_var_train():
                ema_apply_op = ema.apply([mm, vv])
                with tf.control_dependencies([ema_apply_op]):
                    return tf.identity(mm), tf.identity(vv)

            def mean_var_test():
                return ema.average(mm), ema.average(vv)

            mean, var = tf.cond(~phase_test, mean_var_train, mean_var_test)

            info['activations']['last_mean'] = mean
            info['activations']['last_var'] = var

            z = tf.nn.batch_normalization(z, mean, var, beta, gamma, 1e-5)

    info['activations']['preact_' + name] = z

    if activation is not None:
        z = activation(z)

    if info.get('scale_summary'):
        with tf.name_scope('activation'):
            tf.summary.scalar('activation/' + name,
                              tf.sqrt(tf.reduce_mean(z**2)))

    info['activations'][name] = z
    if 'weights' in info:
        info['weights'][name + ':weights'] = W
        #info['weights'][name + ':biases'] = b
    return z
Exemple #16
0
def decoder(y, from_name='fc7', to_name='conv0', info=DummyDict(), use_batch_norm=False, phase_test=None,
        global_step=None):
    BATCH_SIZE = y.get_shape().as_list()[0]

    if use_batch_norm:
        assert global_step is not None
        def bn(z, name):
            return batch_norm(z, global_step=global_step, phase_test=phase_test, name=name)
    else:
        def bn(z, name):
            return z

    if len(y.get_shape().as_list()) == 2:
        y = tf.expand_dims(tf.expand_dims(y, 1), 1)

    def check(name):
        return name in LAYERS[LAYERS.index(to_name):LAYERS.index(from_name)+1]

    if check('fc7'):
        sh = [BATCH_SIZE, 1, 1, 4096]
        y = ops.upconv(y, sh[-1], size=1, strides=1, info=info, activation=None, name='upfc7', output_shape=sh)
        y = bn(y, 'upfc7')
        info['activations']['pre:upfc7'] = y
        y = tf.nn.relu(y)
        info['activations']['upfc7'] = y

    if check('fc6'):
        sh = [BATCH_SIZE, 1, 1, 4096]
        y = ops.upconv(y, sh[-1], size=1, strides=1, info=info, activation=None, name='upfc6', output_shape=sh)
        y = bn(y, 'upfc6')
        info['activations']['pre:upfc6'] = y
        y = tf.nn.relu(y)
        info['activations']['upfc6'] = y

    if check('conv5'):
        sh = [BATCH_SIZE, 6, 6, 256]
        y = ops.upconv(y, sh[-1], size=6, strides=2, info=info, activation=None, name='upconv5_pre', output_shape=sh, padding='VALID')

        sh = [BATCH_SIZE, 13, 13, 256]
        y = ops.upconv(y, sh[-1], size=3, strides=2, info=info, activation=None, name='upconv5', output_shape=sh, padding='VALID')
        y = bn(y, 'upconv5')
        info['activations']['pre:upconv5'] = y
        y = tf.nn.relu(y)
        info['activations']['upconv5'] = y

    if check('conv4'):
        sh = [BATCH_SIZE, 13, 13, 384]
        y = ops.conv(y, sh[-1], size=3, strides=1, info=info, activation=None, name='upconv4', output_shape=sh, padding='SAME')
        y = bn(y, 'upconv4')
        info['activations']['pre:upconv4'] = y
        y = tf.nn.relu(y)
        info['activations']['upconv4'] = y

    if check('conv3'):
        sh = [BATCH_SIZE, 13, 13, 384]
        y = ops.conv(y, sh[-1], size=3, strides=1, info=info, activation=None, name='upconv3', output_shape=sh, padding='SAME')
        y = bn(y, 'upconv3')
        info['activations']['pre:upconv3'] = y
        y = tf.nn.relu(y)
        info['activations']['upconv3'] = y

    if check('conv2'):
        sh = [BATCH_SIZE, 27, 27, 256]
        y = ops.upconv(y, sh[-1], size=3, strides=2, info=info, activation=None, name='upconv2', output_shape=sh, padding='VALID')
        y = bn(y, 'upconv2')
        info['activations']['pre:upconv2'] = y
        y = tf.nn.relu(y)
        info['activations']['upconv2'] = y

    if check('conv1'):
        sh = [BATCH_SIZE, 57, 57, 96]
        y = ops.upconv(y, sh[-1], size=5, strides=2, info=info, activation=None, name='upconv1', output_shape=sh, padding='VALID')
        y = y[:, 1:-1, 1:-1]
        y = bn(y, 'upconv1')
        info['activations']['pre:upconv1'] = y
        y = tf.nn.relu(y)
        info['activations']['upconv1'] = y

    if check('conv0'):
        sh = [BATCH_SIZE, 227, 227, 3]
        y = ops.upconv(y, sh[-1], size=11, strides=4, info=info, activation=None, name='upconv0', output_shape=sh, padding='VALID')

    return y
Exemple #17
0
def build_network_atrous4(x,
                          info=DummyDict(),
                          parameters={},
                          phase_test=None,
                          convolutional=False,
                          final_layer=True,
                          pre_adjust_batch_norm=False):

    # Set up VGG-16
    conv = functools.partial(resnet_conv,
                             size=3,
                             parameters=parameters,
                             info=info,
                             pre_adjust_batch_norm=pre_adjust_batch_norm)
    aconv = functools.partial(resnet_atrous_conv,
                              size=3,
                              parameters=parameters,
                              info=info,
                              pre_adjust_batch_norm=pre_adjust_batch_norm)
    pool = functools.partial(ops.max_pool, info=info)
    avg_pool = functools.partial(ops.avg_pool, info=info)
    dropout = functools.partial(ops.dropout, phase_test=phase_test, info=info)

    z = x
    conv1 = conv(z,
                 64,
                 size=7,
                 stride=2,
                 name='conv1',
                 bn_name='bn_conv1',
                 scale_name='scale_conv1')

    pool1 = pool(conv1, 3, stride=2, name='pool1')

    res2a_branch1 = conv(pool1,
                         256,
                         size=1,
                         name='res2a_branch1',
                         bn_name='bn2a_branch1',
                         scale_name='scale2a_branch1',
                         activation=None)

    res2a_branch2a = conv(pool1,
                          64,
                          size=1,
                          name='res2a_branch2a',
                          bn_name='bn2a_branch2a',
                          scale_name='scale2a_branch2a')

    res2a_branch2b = conv(res2a_branch2a,
                          64,
                          size=3,
                          name='res2a_branch2b',
                          bn_name='bn2a_branch2b',
                          scale_name='scale2a_branch2b')
    res2a_branch2c = conv(res2a_branch2b,
                          256,
                          size=1,
                          name='res2a_branch2c',
                          bn_name='bn2a_branch2c',
                          scale_name='scale2a_branch2c',
                          activation=None)

    res2a = tf.nn.relu(tf.add(res2a_branch1, res2a_branch2c), name='res2a')
    info['activations']['res2a'] = res2a

    # ---
    """
:call nobias-conv 1 0 1 64 res2a res2b_branch2a
:call batch-norm res2b_branch2a bn2b_branch2a
:call bias res2b_branch2a scale2b_branch2a
:call relu res2b_branch2a
:#
:call nobias-conv 3 1 1 64 res2b_branch2a res2b_branch2b
:call batch-norm res2b_branch2b bn2b_branch2b
:call bias res2b_branch2b scale2b_branch2b
:call relu res2b_branch2b
:#
:call nobias-conv 1 0 1 256 res2b_branch2b res2b_branch2c
:call batch-norm res2b_branch2c bn2b_branch2c
:call bias res2b_branch2c scale2b_branch2c
:call add res2a res2b_branch2c res2b
:call relu res2b
    """
    def block(x, ch1, ch2, b, hole=1):
        output = 'res{}'.format(b)
        branch2a = conv(x,
                        ch1,
                        size=1,
                        name='res{}_branch2a'.format(b),
                        bn_name='bn{}_branch2a'.format(b),
                        scale_name='scale{}_branch2a'.format(b))
        branch2b = aconv(branch2a,
                         ch1,
                         size=3,
                         hole=hole,
                         name='res{}_branch2b'.format(b),
                         bn_name='bn{}_branch2b'.format(b),
                         scale_name='scale{}_branch2b'.format(b))
        branch2c = conv(branch2b,
                        ch2,
                        size=1,
                        name='res{}_branch2c'.format(b),
                        bn_name='bn{}_branch2c'.format(b),
                        scale_name='scale{}_branch2c'.format(b),
                        activation=None)
        z = tf.nn.relu(tf.add(x, branch2c), name=output)
        info['activations'][output] = z
        return z

    """
:call nobias-conv 1 0 2 ${ch2} res${a} res${b}_branch1
:call batch-norm res${b}_branch1 bn${b}_branch1
:call bias res${b}_branch1 scale${b}_branch1
:#
:call nobias-conv 1 0 2 ${ch1} res${a} res${b}_branch2a
:call batch-norm res${b}_branch2a bn${b}_branch2a
:call bias res${b}_branch2a scale${b}_branch2a
:call relu res${b}_branch2a
:#
:call nobias-conv 3 1 1 ${ch1} res${b}_branch2a res${b}_branch2b
:call batch-norm res${b}_branch2b bn${b}_branch2b
:call bias res${b}_branch2b scale${b}_branch2b
:call relu res${b}_branch2b
:#
:call nobias-conv 1 0 1 ${ch2} res${b}_branch2b res${b}_branch2c
:call batch-norm res${b}_branch2c bn${b}_branch2c
:call bias res${b}_branch2c scale${b}_branch2c
:call add res${b}_branch1 res${b}_branch2c res${b}
:call relu res${b}
    """

    def block_reduce(x, ch1, ch2, b, stride=2, hole=1):
        output = 'res{}'.format(b)
        branch1 = conv(x,
                       ch2,
                       size=1,
                       stride=stride,
                       name='res{}_branch1'.format(b),
                       bn_name='bn{}_branch1'.format(b),
                       scale_name='scale{}_branch1'.format(b),
                       activation=None)

        branch2a = conv(x,
                        ch1,
                        size=1,
                        stride=stride,
                        name='res{}_branch2a'.format(b),
                        bn_name='bn{}_branch2a'.format(b),
                        scale_name='scale{}_branch2a'.format(b))
        branch2b = aconv(branch2a,
                         ch1,
                         size=3,
                         hole=hole,
                         name='res{}_branch2b'.format(b),
                         bn_name='bn{}_branch2b'.format(b),
                         scale_name='scale{}_branch2b'.format(b))
        branch2c = conv(branch2b,
                        ch2,
                        size=1,
                        name='res{}_branch2c'.format(b),
                        bn_name='bn{}_branch2c'.format(b),
                        scale_name='scale{}_branch2c'.format(b),
                        activation=None)
        z = tf.nn.relu(tf.add(branch1, branch2c), name=output)
        info['activations'][output] = z
        return z

    res2b = block(res2a, 64, 256, '2b')
    res2c = block(res2b, 64, 256, '2c')

    res3a = block_reduce(res2c, 128, 512, '3a')
    """
:call resnet 128 512 3a  3b1
:call resnet 128 512 3b1 3b2
:call resnet 128 512 3b2 3b3
:call resnet 128 512 3b3 3b4
:call resnet 128 512 3b4 3b5
:call resnet 128 512 3b5 3b6
:call resnet 128 512 3b6 3b7
    """
    res3b1 = block(res3a, 128, 512, '3b1')
    res3b2 = block(res3b1, 128, 512, '3b2')
    res3b3 = block(res3b2, 128, 512, '3b3')
    res3b4 = block(res3b3, 128, 512, '3b4')
    res3b5 = block(res3b4, 128, 512, '3b5')
    res3b6 = block(res3b5, 128, 512, '3b6')
    res3b7 = block(res3b6, 128, 512, '3b7')
    """
:call resnet-reduce 256 1024 3b7 4a
    """
    res4a = block_reduce(res3b7, 256, 1024, '4a', stride=1, hole=2)
    """
:call resnet 256 1024 4a 4b1
:call resnet 256 1024 4b1 4b2
:call resnet 256 1024 4b2 4b3
:call resnet 256 1024 4b3 4b4
:call resnet 256 1024 4b4 4b5
:call resnet 256 1024 4b5 4b6
:call resnet 256 1024 4b6 4b7
:call resnet 256 1024 4b7 4b8
:call resnet 256 1024 4b8 4b9
:call resnet 256 1024 4b9 4b10
:call resnet 256 1024 4b10 4b11
:call resnet 256 1024 4b11 4b12
:call resnet 256 1024 4b12 4b13
:call resnet 256 1024 4b13 4b14
:call resnet 256 1024 4b14 4b15
:call resnet 256 1024 4b15 4b16
:call resnet 256 1024 4b16 4b17
:call resnet 256 1024 4b17 4b18
:call resnet 256 1024 4b18 4b19
:call resnet 256 1024 4b19 4b20
:call resnet 256 1024 4b20 4b21
:call resnet 256 1024 4b21 4b22
:call resnet 256 1024 4b22 4b23
:call resnet 256 1024 4b23 4b24
:call resnet 256 1024 4b24 4b25
:call resnet 256 1024 4b25 4b26
:call resnet 256 1024 4b26 4b27
:call resnet 256 1024 4b27 4b28
:call resnet 256 1024 4b28 4b29
:call resnet 256 1024 4b29 4b30
:call resnet 256 1024 4b30 4b31
:call resnet 256 1024 4b31 4b32
:call resnet 256 1024 4b32 4b33
:call resnet 256 1024 4b33 4b34
:call resnet 256 1024 4b34 4b35
    """
    res4b1 = block(res4a, 256, 1024, '4b1', hole=2)
    res4b2 = block(res4b1, 256, 1024, '4b2', hole=2)
    res4b3 = block(res4b2, 256, 1024, '4b3', hole=2)
    res4b4 = block(res4b3, 256, 1024, '4b4', hole=2)
    res4b5 = block(res4b4, 256, 1024, '4b5', hole=2)
    res4b6 = block(res4b5, 256, 1024, '4b6', hole=2)
    res4b7 = block(res4b6, 256, 1024, '4b7', hole=2)
    res4b8 = block(res4b7, 256, 1024, '4b8', hole=2)
    res4b9 = block(res4b8, 256, 1024, '4b9', hole=2)
    res4b10 = block(res4b9, 256, 1024, '4b10', hole=2)
    res4b11 = block(res4b10, 256, 1024, '4b11', hole=2)
    res4b12 = block(res4b11, 256, 1024, '4b12', hole=2)
    res4b13 = block(res4b12, 256, 1024, '4b13', hole=2)
    res4b14 = block(res4b13, 256, 1024, '4b14', hole=2)
    res4b15 = block(res4b14, 256, 1024, '4b15', hole=2)
    res4b16 = block(res4b15, 256, 1024, '4b16', hole=2)
    res4b17 = block(res4b16, 256, 1024, '4b17', hole=2)
    res4b18 = block(res4b17, 256, 1024, '4b18', hole=2)
    res4b19 = block(res4b18, 256, 1024, '4b19', hole=2)
    res4b20 = block(res4b19, 256, 1024, '4b20', hole=2)
    res4b21 = block(res4b20, 256, 1024, '4b21', hole=2)
    res4b22 = block(res4b21, 256, 1024, '4b22', hole=2)
    res4b23 = block(res4b22, 256, 1024, '4b23', hole=2)
    res4b24 = block(res4b23, 256, 1024, '4b24', hole=2)
    res4b25 = block(res4b24, 256, 1024, '4b25', hole=2)
    res4b26 = block(res4b25, 256, 1024, '4b26', hole=2)
    res4b27 = block(res4b26, 256, 1024, '4b27', hole=2)
    res4b28 = block(res4b27, 256, 1024, '4b28', hole=2)
    res4b29 = block(res4b28, 256, 1024, '4b29', hole=2)
    res4b30 = block(res4b29, 256, 1024, '4b30', hole=2)
    res4b31 = block(res4b30, 256, 1024, '4b31', hole=2)
    res4b32 = block(res4b31, 256, 1024, '4b32', hole=2)
    res4b33 = block(res4b32, 256, 1024, '4b33', hole=2)
    res4b34 = block(res4b33, 256, 1024, '4b34', hole=2)
    res4b35 = block(res4b34, 256, 1024, '4b35', hole=2)
    """
:call resnet-reduce 512 2048 4b35 5a
    """
    res5a = block_reduce(res4b35, 512, 2048, '5a', stride=1, hole=4)
    """
:call resnet 512 2048 5a 5b
:call resnet 512 2048 5b 5c
    """
    res5b = block(res5a, 512, 2048, '5b', hole=4)
    res5c = block(res5b, 512, 2048, '5c', hole=4)
    """
layer {
    bottom: "res5c"
    top: "pool5"
    name: "pool5"
    type: "Pooling"
    pooling_param {
        kernel_size: 7
        stride: 1
        pool: AVE
    }
}
    """

    #res5c =
    #res5c = tf.strided_slice(res5c, [0, 0, 0, 0], res5c.get_shape(), [1, 4, 4, 1])

    if final_layer:
        pool5 = ops.atrous_avg_pool(
            res5c,
            7,
            rate=4,
            name='pool5',
            padding='SAME' if convolutional else 'VALID')
        info['activations']['pool5'] = pool5
        ##pool5 = avg_pool(res5c, 7 * 4, stride=1, name='pool5', padding='SAME' if convolutional else 'VALID')
        #pool5 = res5c
        if convolutional:
            z = conv(pool5, 1000, size=1, name='fc1000', activation=None)
        else:
            z = resnet_inner(pool5,
                             1000,
                             info=info,
                             parameters=parameters,
                             activation=None,
                             name='fc1000')
    else:
        z = res5c

    return z
Exemple #18
0
def resnet_atrous_conv(x,
                       channels,
                       size=3,
                       padding='SAME',
                       stride=1,
                       hole=1,
                       batch_norm=False,
                       phase_test=None,
                       activation=tf.nn.relu,
                       name=None,
                       parameter_name=None,
                       bn_name=None,
                       scale_name=None,
                       summarize_scale=False,
                       info=DummyDict(),
                       parameters={},
                       pre_adjust_batch_norm=False):
    if parameter_name is None:
        parameter_name = name
    if scale_name is None:
        scale_name = parameter_name
    with tf.name_scope(name):
        features = int(x.get_shape()[3])
        f = channels
        shape = [size, size, features, f]

        W_init, W_shape = _pretrained_resnet_conv_weights_initializer(
            parameter_name,
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            bn_name=bn_name,
            scale_name=scale_name)
        b_init, b_shape = _pretrained_resnet_biases_initializer(
            scale_name,
            parameters,
            info=info.get('init'),
            pre_adjust_batch_norm=pre_adjust_batch_norm,
            bn_name=bn_name)

        assert W_shape is None or tuple(W_shape) == tuple(
            shape
        ), "Incorrect weights shape for {} (file: {}, spec: {})".format(
            name, W_shape, shape)
        assert b_shape is None or tuple(b_shape) == (
            f, ), "Incorrect bias shape for {} (file: {}, spec; {})".format(
                name, b_shape, (f, ))

        with tf.variable_scope(name):
            W = tf.get_variable('weights',
                                shape,
                                dtype=tf.float32,
                                initializer=W_init)
            b = tf.get_variable('biases', [f],
                                dtype=tf.float32,
                                initializer=b_init)

        if hole == 1:
            raw_conv0 = tf.nn.conv2d(x,
                                     W,
                                     strides=[1, 1, 1, 1],
                                     padding=padding)
        else:
            assert stride == 1
            raw_conv0 = tf.nn.atrous_conv2d(x, W, rate=hole, padding=padding)
        #conv0 = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
        if stride > 1:
            conv0 = tf.strided_slice(raw_conv0, [0, 0, 0, 0],
                                     raw_conv0.get_shape(),
                                     [1, stride, stride, 1])
        else:
            conv0 = raw_conv0
        h1 = tf.reshape(tf.nn.bias_add(conv0, b), conv0.get_shape())

        z = h1

    if activation is not None:
        z = activation(z)

    if info.get('scale_summary'):
        with tf.name_scope('activation'):
            tf.summary.scalar('activation/' + name,
                              tf.sqrt(tf.reduce_mean(z**2)))

    info['activations'][name] = z
    return z