Example #1
0
    def infer(self, inputs, n, is_training, reuse=False):
        with tf.variable_scope('stage0', reuse=reuse):
            x = layer.conv_bn_relu(inputs, [3, 3, 3, 16],
                                   1,
                                   is_training,
                                   name='stage0')
        for i in range(n):
            with tf.variable_scope('stage1_res_%d' % i, reuse=reuse):
                if i == 0:
                    x = layer.res_block(x,
                                        16,
                                        is_training,
                                        name='stage1_res%d' % i,
                                        first_block=True)
                else:
                    x = layer.res_block(x,
                                        16,
                                        is_training,
                                        name='stage1_res%d' % i)

        for i in range(n):
            with tf.variable_scope('stage2_res_%d' % i, reuse=reuse):
                x = layer.res_block(x,
                                    32,
                                    is_training,
                                    name='stage2_res%d' % i)
        for i in range(n):
            with tf.variable_scope('stage3_res_%d' % i, reuse=reuse):
                x = layer.res_block(x,
                                    64,
                                    is_training,
                                    name='stage3_res%d' % i)

        with tf.variable_scope('fc', reuse=reuse):
            x = layer.batchNorm(x, is_training, 'fc_batchNorm')
            x = tf.nn.relu(x)
            feature = tf.reshape(layer.avgpool(x, 8, 8,
                                               name='global_avg_pool'),
                                 shape=(-1, 64))
            x = layer.fc(feature, 10, name='fc')
        return x
def build_wide_resnet(x, num_classes, N, k, block, prob = None):
    channels = [3, 16, 16 * k, 32 * k, 64 * k]
    layers = []

    # conv1
    # conv1 = layer.bn_relu_conv(x, "conv1", channels[0], channels[1], 3)
    conv1 = layer.conv_bn_relu(x, "conv1", channels[0], channels[1], 3)
    layers.append(conv1)

    # conv2
    # 1st
    before20 = layers[-1]
    conv20 = layer.conv_layer(before20, "conv20", [1, 1, channels[1], channels[2]])
    # conv20b = block(before20, "conv20b", prob, channels[1], channels[2]) if block is dropout else block(before20, "conv20b", channels[1], channels[2])
    conv20b_ = layer.conv_bn_relu(before20, "conv20b_", channels[1], channels[2], 3)
    conv20b = layer.conv_layer(conv20b_, "conv20b", [3, 3, channels[2], channels[2]])
    output20 = layer.bn_relu(conv20 + conv20b, "output20")
    layers.append(output20)

    # others
    for n in range(1, N):
        before2n = tf.identity(layers[-1])
        # conv2n = layer.conv_layer(before2n, "conv2%d" % n, [3, 3, channels[2], channels[2]])
        conv2nb = block(layers[-1], "conv2%db" % n, prob, channels[2], channels[2]) if block is dropout else block(layers[-1], "conv2%db" % n, channels[2], channels[2])
        output2n = layer.bn_relu(before2n + conv2nb, "output2%d" % n)
        layers.append(output2n)

    # downsampling0
    #downsampling0 = layer.avg_pool_layer(layers[-1], "downsampling0", [1, 2, 2, 1])
    downsampling0 = layer.max_pool_layer(layers[-1], "downsampling0", [1, 2, 2, 1])
    layers.append(downsampling0)

    # conv3
    # 1st
    before30 = layers[-1]
    conv30 = layer.conv_layer(before30, "conv30", [1, 1, channels[2], channels[3]])
    # conv30b = block(before30, "conv30b", prob, channels[2], channels[3]) if block is dropout else block(before30, "conv30b", channels[2], channels[3])
    conv30b_ = layer.conv_bn_relu(before30, "conv30b_", channels[2], channels[3], 3)
    conv30b = layer.conv_layer(conv30b_, "conv30b", [3, 3, channels[3], channels[3]])
    output30 = layer.bn_relu(conv30 + conv30b, "output30")
    layers.append(output30)

    # others
    for n in range(1, N):
        before3n = tf.identity(layers[-1])
        # conv3n = layer.conv_layer(before3n, "conv3%d" % n, [3, 3, channels[3], channels[3]])
        conv3nb = block(layers[-1], "conv3%db" % n, prob, channels[3], channels[3]) if block is dropout else block(layers[-1], "conv3%db" % n, channels[3], channels[3])
        output3n = layer.bn_relu(before3n + conv3nb, "output3%d" % n)
        layers.append(output3n)

    # downsampling1
    #downsampling1 = layer.avg_pool_layer(layers[-1], "downsampling1", [1, 2, 2, 1])
    downsampling1 = layer.max_pool_layer(layers[-1], "downsampling1", [1, 2, 2, 1])
    layers.append(downsampling1)

    # conv4
    # 1st
    before40 = layers[-1]
    conv40 = layer.conv_layer(before40, "conv40", [1, 1, channels[3],channels[4]])
    # conv40b = block(before40, "conv40b", prob, channels[3], channels[4]) if block is dropout else block(before40, "conv40b", channels[3], channels[4])
    conv40b_ = layer.conv_bn_relu(before40, "conv40b_", channels[3], channels[4], 3)
    conv40b = layer.conv_layer(conv40b_, "conv40b", [3, 3, channels[4], channels[4]])
    output40 = layer.bn_relu(conv40 + conv40b, "output40")
    layers.append(output40)

    # others
    for n in range(1, N):
        before4n = tf.identity(layers[-1])
        # conv4n = layer.conv_layer(before4n, "conv4%d" % n, [3, 3, channels[4], channels[4]])
        conv4nb = block(layers[-1], "conv4%db" % n, prob, channels[4], channels[4]) if block is dropout else block(layers[-1], "conv4%db" % n, channels[4], channels[4])
        output4n = layer.bn_relu(before4n + conv4nb, "output4%d" % n)
        layers.append(output4n)

    # avg pooling
    avg_pool = layer.avg_pool_layer(layers[-1], name = "avg_pool", pooling_size = [1, 8, 8, 1])
    layers.append(avg_pool)

    # flatten and fully connected
    flatten = layer.flatten_layer(layers[-1])
    fc = layer.fc_layer(flatten, num_classes, "fc")
    layers.append(fc)
    
    sm = tf.nn.softmax(layers[-1], name = "prediction")
    layers.append(sm)

    return layers[-1]
Example #3
0
def inference(images,
              phase_train=False,
              has_bn=True,
              image_norm=True,
              qactivation=False,
              qweight=False,
              scale=None):
    images = tf.cast(images, dtype=cfg.dtype)
    if image_norm:
        mean = np.reshape(np.array(cfg.image_mean), (1, 1, 1, 3))
        std = np.reshape(np.array(cfg.image_std), (1, 1, 1, 3))
        images = (images - mean) / std
    else:
        images = images - 128

    alpha = 0.35
    first_block_filters = _make_divisible(32 * alpha, 8)
    f, f_float = conv_bn_relu(images,
                              None,
                              first_block_filters,
                              3,
                              2,
                              qweight=qweight,
                              qactivation=qactivation,
                              scale=scale,
                              has_bn=has_bn,
                              has_relu=True,
                              phase_train=phase_train,
                              scope=cfg.first_conv_name)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(16 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=0)

    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(24 * alpha),
                                       3,
                                       2,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=1)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(24 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=2)

    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(32 * alpha),
                                       3,
                                       2,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=3)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(32 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=4)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(32 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=5)

    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(64 * alpha),
                                       3,
                                       2,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=6)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(64 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=7)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(64 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=8)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(64 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=9)

    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(96 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=10)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(96 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=11)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(96 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=12)

    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(160 * alpha),
                                       3,
                                       2,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=13)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(160 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=14)
    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(160 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=15)

    f, f_float = create_separable_conv(f,
                                       f_float,
                                       int(320 * alpha),
                                       3,
                                       1,
                                       qweight=qweight,
                                       qactivation=qactivation,
                                       scale=scale,
                                       has_bn=has_bn,
                                       has_relu=True,
                                       phase_train=phase_train,
                                       block_id=16)

    if alpha > 1.0:
        last_block_filters = _make_divisible(1280 * alpha, 8)
    else:
        last_block_filters = 1280

    f, f_float = conv_bn_relu(f,
                              f_float,
                              last_block_filters,
                              1,
                              1,
                              qweight=qweight,
                              qactivation=qactivation,
                              scale=scale,
                              has_bn=has_bn,
                              has_relu=True,
                              phase_train=phase_train,
                              scope='conv_1')

    f, f_float = conv_bn_relu(f,
                              f_float,
                              1000,
                              1,
                              stride=1,
                              padding='SAME',
                              qweight=qweight,
                              qactivation=False,
                              scale=scale,
                              has_bn=False,
                              has_relu=False,
                              phase_train=phase_train,
                              scope='prediction')
    f = tf.reduce_mean(f, axis=[1, 2], keepdims=False)
    if cfg.dtype == 'float16':
        f = tf.cast(f, dtype='float32')

    return f
Example #4
0
    def __build_network(self):
        with tf.name_scope('x'):
            self.raw_x = tf.placeholder(tf.float32, [None, 32, 32, 3], 'x')
            self.batch_size = tf.shape(self.raw_x, out_type=tf.int32)[0]
            if self.phase == 'TRAIN':
                self.x = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), self.raw_x)
                self.x = tf.pad(self.x, [[0, 0], [4, 4], [4, 4], [0, 0]], 'CONSTANT')
                self.x = tf.map_fn(lambda img: tf.random_crop(img, [32, 32, 3]), self.x)
            else:
                self.x = self.raw_x
        
        self.conv1_1 = conv_bn_relu('conv1_1', self.x, self.dim[0], self.phase, self.reg, dropout=self.dropout[0], if_bn=self.if_bn)
        self.conv1_2 = conv_bn_relu('conv1_2', self.conv1_1, self.dim[1], self.phase, self.reg, dropout=self.dropout[1], if_bn=self.if_bn)
        self.pool1 = tf.nn.max_pool(self.conv1_2, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool1')

        self.conv2_1 = conv_bn_relu('conv2_1', self.pool1, self.dim[2], self.phase, self.reg, dropout=self.dropout[2], if_bn=self.if_bn)
        self.conv2_2 = conv_bn_relu('conv2_2', self.conv2_1, self.dim[3], self.phase, self.reg, dropout=self.dropout[3], if_bn=self.if_bn)
        self.pool2 = tf.nn.max_pool(self.conv2_2, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool2')

        self.conv3_1 = conv_bn_relu('conv3_1', self.pool2, self.dim[4], self.phase, self.reg, dropout=self.dropout[4], if_bn=self.if_bn)
        self.conv3_2 = conv_bn_relu('conv3_2', self.conv3_1, self.dim[5], self.phase, self.reg, dropout=self.dropout[5], if_bn=self.if_bn)
        self.conv3_3 = conv_bn_relu('conv3_3', self.conv3_2, self.dim[6], self.phase, self.reg, dropout=self.dropout[6], if_bn=self.if_bn)
        self.pool3 = tf.nn.max_pool(self.conv3_3, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool3')

        self.conv4_1 = conv_bn_relu('conv4_1', self.pool3, self.dim[7], self.phase, self.reg, dropout=self.dropout[7], if_bn=self.if_bn)
        self.conv4_2 = conv_bn_relu('conv4_2', self.conv4_1, self.dim[8], self.phase, self.reg, dropout=self.dropout[8], if_bn=self.if_bn)
        self.conv4_3 = conv_bn_relu('conv4_3', self.conv4_2, self.dim[9], self.phase, self.reg, dropout=self.dropout[9], if_bn=self.if_bn)
        self.pool4 = tf.nn.max_pool(self.conv4_3, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool4')

        self.conv5_1 = conv_bn_relu('conv5_1', self.pool4, self.dim[10], self.phase, self.reg, dropout=self.dropout[10], if_bn=self.if_bn)
        self.conv5_2 = conv_bn_relu('conv5_2', self.conv5_1, self.dim[11], self.phase, self.reg, dropout=self.dropout[11], if_bn=self.if_bn)
        self.conv5_3 = conv_bn_relu('conv5_3', self.conv5_2, self.dim[12], self.phase, self.reg, dropout=self.dropout[12], if_bn=self.if_bn)
        self.pool5 = tf.nn.max_pool(self.conv5_3, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME', name='pool5')

        self.fc0 = layers.flatten(self.pool5)
        if self.dropout[13] is not None:
            if self.phase == 'TRAIN':
                self.fc0_dropout = tf.nn.dropout(self.fc0, self.dropout[13], name='fc0_dropout')
            else:
                self.fc0_dropout = tf.multiply(self.fc0, self.dropout[13], name='fc0_dropout')
        else:
            self.fc0_dropout = self.fc0

        self.fc1 = fc_bn_relu('fc6', self.fc0_dropout, self.dim[13], self.phase, self.reg, dropout=self.dropout[14], if_bn=self.if_bn)
        self.fc2 = fc_bn_relu('fc7', self.fc1, self.dim[14], self.phase, self.reg, dropout=self.dropout[15], if_bn=self.if_bn)
        
        with tf.variable_scope('y_w'):
            w = tf.get_variable('w', [self.dim[14], 10], tf.float32, layers.xavier_initializer(), self.reg)
            b = tf.get_variable('b', [10], tf.float32, tf.zeros_initializer())
        with tf.name_scope('y'):
            self.y_hat_logit = tf.nn.bias_add(tf.matmul(self.fc2, w), b, name='y_hat_logit')
            self.y_hat = tf.arg_max(self.y_hat_logit, -1, tf.int32, 'y_hat')
            self.y_logit = tf.placeholder(tf.float32, [None, 10], 'y_logit')
            self.y = tf.arg_max(self.y_logit, -1, tf.int32, 'y')
        with tf.name_scope('loss'):           
            self.loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=self.y_logit, logits=self.y_hat_logit)) / tf.cast(self.batch_size, tf.float32)
        with tf.name_scope('accuracy'):
            self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.y_hat, self.y), tf.float32))

        tf.summary.scalar('ce_loss', self.loss)
        tf.summary.scalar('loss', self.loss)
        self.summary = tf.summary.merge_all()
        
        with tf.name_scope('optimizer'):
            self.lr = tf.placeholder(tf.float32, [], 'lr')
            self.global_step = tf.get_variable('global_step', [], tf.float32, tf.zeros_initializer(), trainable=False)
#            self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.loss, global_step=self.global_step)
            self.optimizer = tf.train.MomentumOptimizer(self.lr, momentum=0.9).minimize(self.loss, global_step=self.global_step)
Example #5
0
def create_separable_conv(
    x,
    x_float,
    out_channels,
    ksize,
    stride=1,
    qweight=False,
    qactivation=False,
    scale=None,
    has_bn=True,
    has_relu=True,
    phase_train=False,
    block_id=0,
):
    in_channels = x.shape.as_list()[3]
    depthwise_filters = in_channels
    pointwise_filters = _make_divisible(out_channels, 8)
    prefix = 'block_{}_'.format(block_id)

    f = x
    f_float = x_float
    if block_id:
        # Expand
        f, f_float = conv_bn_relu(x,
                                  x_float,
                                  in_channels * 6,
                                  1,
                                  stride=1,
                                  qweight=qweight,
                                  qactivation=qactivation,
                                  padding='SAME',
                                  scale=scale,
                                  has_bn=has_bn,
                                  has_relu=has_relu,
                                  phase_train=phase_train,
                                  scope=prefix + 'expand')
        depthwise_filters = in_channels * 6
    f, f_float = conv_bn_relu(f,
                              f_float,
                              depthwise_filters,
                              ksize,
                              stride=stride,
                              qweight=qweight,
                              qactivation=qactivation,
                              padding='SAME',
                              groups=depthwise_filters,
                              scale=scale,
                              has_bn=has_bn,
                              has_relu=has_relu,
                              phase_train=phase_train,
                              scope=prefix + 'depthwise')
    f, f_float = conv_bn_relu(f,
                              f_float,
                              pointwise_filters,
                              1,
                              stride=1,
                              qweight=qweight,
                              qactivation=qactivation,
                              padding='SAME',
                              scale=scale,
                              has_bn=has_bn,
                              has_relu=False,
                              phase_train=phase_train,
                              scope=prefix + 'project')

    if in_channels == pointwise_filters and stride == 1:
        f, f_float = add(f,
                         f_float,
                         x,
                         x_float,
                         phase_train,
                         qactivation=qactivation,
                         scope=prefix + 'add',
                         scale=scale)
    return f, f_float