Пример #1
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        with tf.name_scope('Preprocessing'):
            red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
            preprocessed_input = tf.concat([
                tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
                tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
                tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
            ], 3)
        x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                       stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
                       activation=tf.nn.relu, padding='VALID')
        padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')

        logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
                                    kernel_size=(1, 1),
                                    l2_strength=self.args.l2_strength,
                                    bias=self.args.bias,
                                    is_training=self.is_training)
        self.logits = flatten(logits_unflattened)

        self.__init_output()
Пример #2
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        with tf.name_scope('Preprocessing'):
            red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
            preprocessed_input = tf.concat([
                tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
                tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
                tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
            ], 3)
        x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]],
                          "CONSTANT")
        conv1 = conv2d('conv1',
                       x=x_padded,
                       w=None,
                       num_filters=self.output_channels['conv1'],
                       kernel_size=(3, 3),
                       stride=(2, 2),
                       l2_strength=self.args.l2_strength,
                       bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled,
                       is_training=self.is_training,
                       activation=tf.nn.relu,
                       padding='VALID')
        padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(padded,
                               size=(3, 3),
                               stride=(2, 2),
                               name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4,
                                  size=(7, 7),
                                  stride=(1, 1),
                                  name='global_pool',
                                  padding='VALID')

        logits_unflattened = conv2d('fc',
                                    global_pool,
                                    w=None,
                                    num_filters=self.args.num_classes,
                                    kernel_size=(1, 1),
                                    l2_strength=self.args.l2_strength,
                                    bias=self.args.bias,
                                    is_training=self.is_training)
        self.logits = flatten(logits_unflattened)

        self.__init_output()
Пример #3
0
 def __build(self):
     self.__init_global_epoch()
     self.__init_global_step()
     self.__init_input()
     # 0. 图像预处理 减去均值 乘以归一化系数##################################
     with tf.name_scope('Preprocessing'):
         # 分割成三通道
         red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
         # 每个通道 减去均值 乘以归一化系数 后再concat/merge 通道扩展合并
         preprocessed_input = tf.concat([
             tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
             tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
             tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
         ], 3)
     # 1. conv1 3*3*3*24 卷积 步长 2 BN RELU #########################################################
     ######## 周围填充 
     x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
     ######## conv
     conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                    stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                    batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
                    activation=tf.nn.relu, padding='VALID')
     # 2. 最大值池化 3*3 步长2 ##################################################
     padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
     max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
     # 3. 一次 步长为2 非分组点卷积 concate通道扩展合并模块, 再进行3次步长为1的 add通道叠加模块
     stage2 = self.__stage(max_pool, stage=2, repeat=3)
     # 4. 一次 步长为2 分组点卷积   concate通道扩展合并模块, 再进行7次步长为1的 add通道叠加模块
     stage3 = self.__stage(stage2, stage=3, repeat=7)
     # 5. 一次 步长为2 分组点卷积   concate通道扩展合并模块, 再进行3次步长为1的 add通道叠加模块
     stage4 = self.__stage(stage3, stage=4, repeat=3)
     # 6. 全局均值池化层 7*7 池化核 步长1
     global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')
     # 7. 1*1点卷积 输出 类别数量个 卷积特征图
     logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
                                 kernel_size=(1, 1),# 1*1点卷积
                                 l2_strength=self.args.l2_strength,
                                 bias=self.args.bias,
                                 is_training=self.is_training)
     # 8. 摊平 到 一维
     self.logits = flatten(logits_unflattened)
     # 9. 计算误差 
     self.__init_output()
Пример #4
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        x_resized = self.__resize(self.X)
        conv1 = conv2d('conv1', x=x_resized, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                       stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training)
        conv1_padded = tf.pad(conv1, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(conv1_padded, size=(3, 3), stride=(2, 2), name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool')
        flattened = flatten(global_pool)

        self.logits = dense('fc', flattened, w=None, output_dim=self.args.num_classes,
                            l2_strength=self.args.l2_strength,
                            bias=self.args.bias,
                            is_training=self.is_training)
        self.__init_output()