示例#1
0
    def build(self):
        with tf.variable_scope(self.name):
            stacker = Stacker(self.x)
            stacker.conv2d(1, CONV_FILTER_3311)
            self.recon = stacker.sigmoid()

        return self
示例#2
0
 def build(self):
     with tf.variable_scope(self.name, reuse=self.reuse):
         self.stacker = Stacker(self.x, verbose=self.verbose)
         self.stacker.resize_image((224, 224))
         self.stacker = self.head(self.stacker)
         self.stacker = self.body(self.stacker)
         self.foot(self.stacker, self.n_channel, self.n_classes)
示例#3
0
    def build(self):
        with tf.variable_scope(self.name, reuse=self.reuse):
            self.stacker = Stacker(self.x)

            self.stacker.conv_block(self.capacity * 8, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity * 8, CONV_FILTER_3311, relu)
            self.stacker.upscale_2x_block(self.capacity * 8, CONV_FILTER_3322,
                                          relu)

            self.stacker.conv_block(self.capacity * 4, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity * 4, CONV_FILTER_3311, relu)
            self.stacker.upscale_2x_block(self.capacity * 4, CONV_FILTER_3322,
                                          relu)

            self.stacker.conv_block(self.capacity * 2, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity * 2, CONV_FILTER_3311, relu)
            self.stacker.upscale_2x_block(self.capacity * 2, CONV_FILTER_3322,
                                          relu)

            self.stacker.conv_block(self.capacity, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity, CONV_FILTER_3311, relu)
            self.stacker.conv2d(self.output_channel, CONV_FILTER_3311)
            self.stacker.relu()

            self.decode = self.stacker.last_layer

        return self
示例#4
0
    def build(self):
        with tf.variable_scope(self.name):
            self.stacker = Stacker(self.x)

            self.stacker.resize_image(self.resize_shape)
            stacker = self.stem(self.stacker)

            for i in range(4):
                stacker.add_layer(self.inception_A)

            stacker.add_layer(self.reduction_A)
            for i in range(7):
                stacker.add_layer(self.inception_B)
            self.aux_logit, self.aux_proba = self.aux(stacker.last_layer,
                                                      self.n_classes)

            stacker.add_layer(self.reduction_B)

            for i in range(3):
                stacker.add_layer(self.inception_C)

            stacker.max_pooling((8, 8, 8, 8))

            # dropout
            self.flatten_layer = stacker.flatten()
            stacker.linear_block(self.n_channel * 64, relu)
            self.logit = stacker.linear(self.n_classes)
            self.proba = stacker.softmax()
示例#5
0
    def generator(zs, Ys, cs, net_shapes, flatten_size, output_shape, reuse=False, name='generator'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(concat((zs, Ys, cs), axis=1))

            for shape in net_shapes:
                layer.linear_block(shape, lrelu)

            layer.linear(flatten_size)
            layer.sigmoid()
            layer.reshape(output_shape)

        return layer.last_layer
示例#6
0
    def build(self):
        def _Unet_recursion(stacker, n_channel, level, dropout_tensor=None):
            if level == 0:
                for i in range(self.depth):
                    stacker.conv_block(n_channel, CONV_FILTER_3311, relu)
                if dropout_tensor:
                    stacker.dropout(dropout_tensor)

            else:
                # encode
                for i in range(self.depth):
                    stacker.conv_block(n_channel, CONV_FILTER_3311, relu)
                if dropout_tensor:
                    stacker.dropout(dropout_tensor)

                concat = stacker.last_layer
                stacker.max_pooling(CONV_FILTER_2222)

                stacker = _Unet_recursion(stacker, n_channel * 2, level - 1)

                # decode
                stacker.upscale_2x_block(n_channel, CONV_FILTER_2211, relu)

                stacker.concat(concat, axis=3)
                for i in range(self.depth):
                    stacker.conv_block(n_channel, CONV_FILTER_3311, relu)
                    if dropout_tensor:
                        stacker.dropout(dropout_tensor)

            return stacker

        with tf.variable_scope(self.name, reuse=self.reuse):
            self.DynamicDropoutRate = DynamicDropoutRate(self.dropout_rate)
            self.dropout_tensor = self.DynamicDropoutRate.tensor

            self.stacker = Stacker(self.x, verbose=self.verbose)

            self.stacker = _Unet_recursion(self.stacker,
                                           n_channel=self.n_channel,
                                           level=self.level,
                                           dropout_tensor=self.dropout_tensor)

            self.stacker.conv2d(self.n_classes, CONV_FILTER_3311)
            self.logit = self.stacker.last_layer

            self.stacker.pixel_wise_softmax()
            self.proba = self.stacker.last_layer

        return self
示例#7
0
    def build(self):
        with tf.variable_scope(self.name):
            self.skip_tensors = []
            self.stacker = Stacker(self.x)

            stacker = self.stem(self.stacker)

            stacker.add_layer(self.module_A, self.a_channels0)
            stacker.add_layer(self.module_A, self.a_channels1)
            stacker.add_layer(self.module_A, self.a_channels2)
            self.skip_tensors += [stacker.last_layer]

            stacker.add_layer(self.multi_pool_A, self.a_b_multi_pool_channels)

            stacker.add_layer(self.module_B, self.b_channels0)
            stacker.add_layer(self.module_B, self.b_channels1)
            stacker.add_layer(self.module_B, self.b_channels2)
            stacker.add_layer(self.module_B, self.b_channels3)
            # self.aux_logit, self.aux_proba = self.aux(stacker.last_layer, self.n_classes)
            self.skip_tensors += [stacker.last_layer]

            stacker.add_layer(self.multi_pool_B, self.b_c_multi_pool_channels)

            stacker.add_layer(self.module_C, self.c_channels0)
            stacker.add_layer(self.module_C, self.c_channels1)
            self.last_layer = stacker.last_layer
示例#8
0
    def bottom_layer(self, x, n_channel, depth, dropout_rate_tensor):
        stacker = Stacker(x, name='bottom')
        stacker = self.conv_seq(stacker, n_channel, depth, dropout_rate_tensor)
        stacker = self.conv_seq(stacker, n_channel, depth, dropout_rate_tensor)
        self.bottom_layer_stacker = stacker

        return self.bottom_layer_stacker.last_layer
示例#9
0
    def discriminator(Xs, net_shapes, reuse=False, name='discriminator'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(flatten(Xs))
            for shape in net_shapes:
                layer.linear_block(shape, relu)

            layer.linear(1)
            layer.sigmoid()

        return layer.last_layer
示例#10
0
def common_AE_decoder_tail(stack: Stacker,
                           flatten_size,
                           output_shape,
                           bn=False,
                           activation='sigmoid') -> Stacker:
    stack.linear(flatten_size)
    if bn:
        stack.bn()
    stack.activation(activation)
    stack.reshape(output_shape)
    return stack
示例#11
0
def _residual_atrous_block(x, n_channel, filter_, activation):
    name = 'residual_block'
    with tf.variable_scope(name):
        x_in = x
        stack = Stacker(x)
        stack.atrous_conv2d_block(n_channel, filter_, 2, activation)
        stack.atrous_conv2d_block(n_channel, filter_, 2, activation)
        stack.atrous_conv2d_block(n_channel, filter_, 2, activation)
        stack.residual_add(x_in)

    return stack.last_layer
示例#12
0
    def encoder(self, Xs, Ys, net_shapes, reuse=False, name='encoder'):
        with tf.variable_scope(name, reuse=reuse):
            stack = Stacker(concat((flatten(Xs), Ys), axis=1))

            for shape in net_shapes:
                stack.linear(shape)
                stack.lrelu()

            stack.linear(self.z_size * 2)

        return stack.last_layer
示例#13
0
    def build(self):
        with tf.variable_scope(self.name):
            self.stacker = Stacker(self.x, verbose=self.verbose)
            #  resize to 224 * 224
            self.stacker.resize_image((224, 224))

            # 224
            self.stacker.conv_block(self.n_channel, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.n_channel, CONV_FILTER_3311, relu)
            self.stacker.max_pooling(CONV_FILTER_2222)

            # 112
            self.stacker.conv_block(self.n_channel * 2, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.n_channel * 2, CONV_FILTER_3311, relu)
            self.stacker.max_pooling(CONV_FILTER_2222)

            # 52
            self.stacker.conv_block(self.n_channel * 4, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.n_channel * 4, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.n_channel * 4, CONV_FILTER_3311, relu)
            self.stacker.max_pooling(CONV_FILTER_2222)

            # 28
            self.stacker.conv_block(self.n_channel * 8, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.n_channel * 8, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.n_channel * 8, CONV_FILTER_3311, relu)
            self.stacker.max_pooling(CONV_FILTER_2222)

            # 14
            self.stacker.conv_block(self.n_channel * 8, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.n_channel * 8, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.n_channel * 8, CONV_FILTER_3311, relu)
            self.stacker.max_pooling(CONV_FILTER_2222)

            # 7 512 to fc
            self.stacker.flatten()
            self.stacker.linear_block(self.n_channel * 64, relu)
            self.stacker.linear_block(self.n_channel * 64, relu)
            self.stacker.linear(self.n_classes)
            self.logit = self.stacker.last_layer
            self.stacker.softmax()
            self.proba = self.stacker.last_layer
示例#14
0
def test_TFL1Normalize():
    import numpy as np
    x = np.random.normal(size=[100, 10])
    y = np.random.normal(size=[100, 1])
    x_ph = placeholder(tf.float32, [-1, 10], name='ph_x')

    with tf.variable_scope('net'):
        stack = Stacker(x_ph)
        stack.linear_block(100, relu)
        stack.linear_block(100, relu)
        logit = stack.linear(1)
        proba = stack.softmax()

        loss = (proba - y)**2

    var_list = collect_vars('net')
    l1_norm = TFL1Normalize(var_list)
    l1_norm.build()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        val = sess.run(l1_norm.penalty, feed_dict={x_ph: x})
        rate = sess.run(l1_norm.rate_var)
        print(f'l1_norm = {val}, rate = {rate}')

        l1_norm.update_rate(sess, 0.5)
        val = sess.run(l1_norm.penalty, feed_dict={x_ph: x})
        rate = sess.run(l1_norm.rate_var)
        print(f'l1_norm = {val}, rate = {rate}')

        l1_norm.update_rate(sess, 0.1)
        val = sess.run(l1_norm.penalty, feed_dict={x_ph: x})
        rate = sess.run(l1_norm.rate_var)
        print(f'l1_norm = {val}, rate = {rate}')
示例#15
0
    def build(self):
        with tf.variable_scope(self.name, reuse=self.reuse):
            self.stacker = Stacker(self.x)

            self.stacker.conv_block(self.capacity, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity, CONV_FILTER_3311, relu)
            self.stacker.max_pooling(CONV_FILTER_2222)

            self.stacker.conv_block(self.capacity * 2, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity * 2, CONV_FILTER_3311, relu)
            self.stacker.max_pooling(CONV_FILTER_2222)

            self.stacker.conv_block(self.capacity * 4, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity * 4, CONV_FILTER_3311, relu)
            self.stacker.max_pooling(CONV_FILTER_2222)

            self.stacker.conv_block(self.capacity * 8, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity * 8, CONV_FILTER_3311, relu)
            self.stacker.conv_block(self.capacity * 8, CONV_FILTER_3311, relu)

            self.encode = self.stacker.last_layer

        return self
示例#16
0
文件: VAE.py 项目: demetoir/MLtools
def encoder_tail(stack: Stacker,
                 latent_code_size,
                 bn=False,
                 activation='none') -> Stacker:
    stack.linear(latent_code_size * 2)
    if bn:
        stack.bn()
    stack.activation(activation)
    return stack
示例#17
0
def common_AE_encoder_tail(stack: Stacker,
                           latent_code_size,
                           bn=False,
                           activation='relu') -> Stacker:
    stack.linear(latent_code_size)
    if bn:
        stack.bn()
    stack.activation(activation)
    return stack
示例#18
0
def common_linear_stack(stack: Stacker,
                        net_shapes,
                        bn=True,
                        activation='relu') -> Stacker:
    for shape in net_shapes:
        stack.linear(shape)
        if bn:
            stack.bn()
        stack.activation(activation)
    return stack
示例#19
0
    def classifier(self, Xs, net_shapes, name='classifier'):
        with tf.variable_scope(name):
            layer = Stacker(flatten(Xs))

            for net_shape in net_shapes:
                layer.linear_block(net_shape, relu)

            layer.linear(self.Y_flatten_size)
            logit = layer.last_layer
            h = softmax(logit)
        return logit, h
示例#20
0
    def encoder_tail(stack: Stacker,
                     Y_flatten_size,
                     z_size,
                     bn=False,
                     activation='none'):
        stack.linear(z_size + Y_flatten_size)
        if bn:
            stack.bn()
        stack.activation(activation)

        zs = stack.last_layer[:, :z_size]
        Ys_gen = stack.last_layer[:, z_size:]
        hs = softmax(Ys_gen)
        return zs, Ys_gen, hs
示例#21
0
    def Q_function(Xs_gen, net_shapes, discrete_code_size, continuous_code_size, reuse=False, name='Q_function'):
        with tf.variable_scope(name, reuse=reuse):
            layer = Stacker(Xs_gen)
            for shape in net_shapes:
                layer.linear_block(shape, relu)

            code_logit = layer.linear(discrete_code_size + continuous_code_size)
            code = layer.softmax()

            discrete_code = code[:, :discrete_code_size]
            continuous_code = code[:, discrete_code_size:]
            discrete_code_logit = code_logit[:, :discrete_code_size]
            continuous_code_logit = code_logit[:, discrete_code_size:]

        return discrete_code, discrete_code_logit, continuous_code, continuous_code_logit
def test_TFDynamicLearningRate():
    import numpy as np
    x = np.random.normal(size=[100, 10])
    y = np.random.normal(size=[100, 1])
    x_ph = placeholder(tf.float32, [-1, 10], name='ph_x')

    stack = Stacker(x_ph)
    stack.linear_block(100, relu)
    stack.linear_block(100, relu)
    logit = stack.linear(1)
    proba = stack.softmax()

    loss = (proba - y) ** 2
    dlr = TFDynamicLearningRate(0.01)
    dlr.build()

    lr_var = dlr.learning_rate
    var_list = None
    train_op = tf.train.AdamOptimizer(learning_rate=lr_var, beta1=0.9).minimize(loss, var_list=var_list)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print(f'lr = {dlr.lr_tensor(sess)}')

        dlr.update(sess, 0.1)
        print(f'lr = {dlr.lr_tensor(sess)}')
        sess.run(train_op, feed_dict={x_ph: x})
        print(f'loss = {np.mean(sess.run(loss, feed_dict={x_ph: x}))}')

        dlr.update(sess, 0.05)
        print(f'lr = {dlr.lr_tensor(sess)}')
        sess.run(train_op, feed_dict={x_ph: x})
        print(f'loss = {np.mean(sess.run(loss, feed_dict={x_ph: x}))}')

        dlr.update(sess, 0.02)
        print(f'lr = {dlr.lr_tensor(sess)}')
        sess.run(train_op, feed_dict={x_ph: x})
        print(f'loss = {np.mean(sess.run(loss, feed_dict={x_ph: x}))}')
示例#23
0
    def multi_pool_B(x, channels, name='multi_pool_B'):
        with tf.variable_scope(name):
            a = Stacker(x, name='branch_A')
            a.conv_block(channels['a0'], CONV_FILTER_1111, relu)
            a.conv_block(channels['a1'], CONV_FILTER_3322, relu)

            b = Stacker(x, name='branch_B')
            b.conv_block(channels['b0'], CONV_FILTER_1111, relu)
            b.conv_block(channels['b1'], CONV_FILTER_7111, relu)
            b.conv_block(channels['b2'], CONV_FILTER_1711, relu)
            b.conv_block(channels['b3'], CONV_FILTER_3322, relu)

            c = Stacker(x, name='branch_C')
            c.max_pooling(CONV_FILTER_3322)

            return concat([a.last_layer, b.last_layer, c.last_layer], axis=3)
示例#24
0
    def module_B(x, channels, name='module_B'):
        # mixed4: 17 x 17 x 768.

        with tf.variable_scope(name):
            a = Stacker(x, name='branch_A')
            a.conv_block(channels['a0'], CONV_FILTER_1111, relu)

            b = Stacker(x, name='branch_B')
            b.conv_block(channels['b0'], CONV_FILTER_1111, relu)
            b.conv_block(channels['b1'], CONV_FILTER_7111, relu)
            b.conv_block(channels['b2'], CONV_FILTER_1711, relu)

            c = Stacker(x, name='branch_C')
            c.conv_block(channels['c0'], CONV_FILTER_1111, relu)
            c.conv_block(channels['c1'], CONV_FILTER_7111, relu)
            c.conv_block(channels['c2'], CONV_FILTER_1711, relu)
            c.conv_block(channels['c3'], CONV_FILTER_7111, relu)
            c.conv_block(channels['c4'], CONV_FILTER_1711, relu)

            d = Stacker(x, name='branch_D')
            d.max_pooling(CONV_FILTER_3311)
            d.conv_block(channels['d1'], CONV_FILTER_3311, relu)

            return concat(
                [a.last_layer, b.last_layer, c.last_layer, d.last_layer],
                axis=3)
示例#25
0
 def decoder_head(zs, Ys) -> Stacker:
     stack = Stacker(concat((zs, Ys), axis=1))
     return stack
示例#26
0
 def encoder_head(Xs):
     stack = Stacker(Xs)
     stack.flatten()
     return stack
示例#27
0
    def build(self):

        self.DynamicDropoutRate = DynamicDropoutRate(self.dropout_rate)
        self.drop_out_tensor = self.DynamicDropoutRate.tensor

        with tf.variable_scope(self.name, reuse=self.reuse):
            self.encoder = InceptionV2UNetEncoderModule(self.x,
                                                        None,
                                                        resize_shape=(201,
                                                                      201),
                                                        capacity=self.capacity)
            self.encoder.build()
            encode = self.encoder.last_layer
            skip_tensors = self.encoder.skip_tensors[::-1]

            bottom_layer = self.bottom_layer(encode, self.capacity * 128,
                                             self.depth, self.drop_out_tensor)

            pprint(skip_tensors)
            stacker = Stacker(bottom_layer)

            stacker.layers_conv2d_transpose(self.n_channel * 16 * 2, (3, 3),
                                            (2, 2), 'SAME')
            stacker.bn()
            stacker.relu()
            stacker.concat(
                [stacker.last_layer[:, :13, :13, :], skip_tensors[0]], 3)
            stacker.layers_dropout(self.drop_out_tensor)
            stacker.layers_conv2d(self.n_channel * 16 * 2, (3, 3), (1, 1),
                                  'SAME')
            stacker.bn()
            stacker.relu()
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 16 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 16 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 16 * 2)
            stacker.bn()
            stacker.relu()

            # 12 to 25
            stacker.layers_conv2d_transpose(self.n_channel * 8 * 2, (3, 3),
                                            (2, 2), 'SAME')
            stacker.bn()
            stacker.relu()
            stacker.concat([stacker.last_layer, skip_tensors[1]], 3)
            stacker.layers_dropout(self.drop_out_tensor)
            stacker.layers_conv2d(self.n_channel * 8 * 2, (3, 3), (1, 1),
                                  'SAME')
            stacker.bn()
            stacker.relu()
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 8 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 8 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 8 * 2)
            stacker.bn()
            stacker.relu()

            # 25 to 50
            stacker.layers_conv2d_transpose(self.n_channel * 4 * 2, (3, 3),
                                            (2, 2), 'SAME')
            stacker.bn()
            stacker.relu()
            stacker.concat(
                [stacker.last_layer[:, :51, :51, :], skip_tensors[2]], 3)
            stacker.layers_dropout(self.drop_out_tensor)
            stacker.layers_conv2d(self.n_channel * 4 * 2, (3, 3), (1, 1),
                                  'SAME')
            stacker.bn()
            stacker.relu()
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 4 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 4 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 4 * 2)
            stacker.bn()
            stacker.relu()

            # 50 to 101
            stacker.layers_conv2d_transpose(self.n_channel * 2 * 2, (3, 3),
                                            (2, 2), 'SAME')
            stacker.bn()
            stacker.relu()
            stacker.concat(
                [stacker.last_layer[:, :101, :101, :], skip_tensors[3]], 3)
            stacker.layers_dropout(self.drop_out_tensor)
            stacker.layers_conv2d(self.n_channel * 2 * 2, (3, 3), (1, 1),
                                  'SAME')
            stacker.bn()
            stacker.relu()
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 2 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 2 * 2)
            stacker.add_layer(_residual_block_pre_activation,
                              self.n_channel * 2 * 2)
            stacker.bn()
            stacker.relu()

            decode = stacker.last_layer

            stacker = Stacker(decode, name='to_match')
            stacker.conv2d(self.n_classes, CONV_FILTER_3311)
            self.logit = stacker.last_layer
            self.proba = stacker.sigmoid()
示例#28
0
    def module_C(x, channels, name='module_C'):
        # type b?
        with tf.variable_scope(name):
            a = Stacker(x, name='branch_A')
            a.conv_block(channels['a0'], CONV_FILTER_1111, relu)

            b = Stacker(x, name='branch_B')
            b.conv_block(channels['b0'], CONV_FILTER_1111, relu)

            b0 = Stacker(b.last_layer, name='branch_B0')
            b0.conv_block(channels['b1-0'], CONV_FILTER_3111, relu)

            b1 = Stacker(b.last_layer, name='branch_B1')
            b1.conv_block(channels['b1-1'], CONV_FILTER_1311, relu)

            c = Stacker(x, name='branch_C')
            c.conv_block(channels['c0'], CONV_FILTER_1111, relu)
            c.conv_block(channels['c1'], CONV_FILTER_3311, relu)

            c0 = Stacker(c.last_layer, name='branch_C0')
            c0.conv_block(channels['c2-0'], CONV_FILTER_1311, relu)
            c1 = Stacker(c.last_layer, name='branch_C1')
            c1.conv_block(channels['c2-1'], CONV_FILTER_3111, relu)

            d = Stacker(x, name='branch_D')
            d.max_pooling(CONV_FILTER_3311)
            d.conv_block(channels['d1'], CONV_FILTER_3311, relu)

            return concat([
                a.last_layer, b0.last_layer, b1.last_layer, c0.last_layer,
                c1.last_layer, d.last_layer
            ],
                          axis=3)
示例#29
0
def _residual_block_pre_activation(x,
                                   n_channel,
                                   filter_=(3, 3),
                                   name='residual_block_pre_activation'):
    with tf.variable_scope(name):
        stack = Stacker(x)

        stack.bn()
        stack.relu()
        stack.layers_conv2d(n_channel, filter_, (1, 1), 'SAME')

        stack.bn()
        stack.relu()
        stack.layers_conv2d(n_channel, filter_, (1, 1), 'SAME')

        stack.residual_add(x)

        return stack.last_layer
示例#30
0
    def aux(self, x, n_classes, name='aux'):
        with tf.variable_scope(name):
            stack = Stacker(x)
            stack.avg_pooling(CONV_FILTER_5533)
            stack.conv_block(self.n_channel * 8, CONV_FILTER_1111, relu)

            filter_ = list(stack.last_layer.shape[1:3]) + [1, 1]
            stack.conv_block(self.n_channel * 48, filter_, relu)
            stack.flatten()
            logit = stack.linear(n_classes)
            proba = stack.softmax()
            return logit, proba