def test_infer(self):
     paddle.enable_static()
     with paddle.static.program_guard(paddle.static.Program()):
         x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
         scale = paddle.fluid.data('Scale', self.scale_np.shape,
                                   self.scale_np.dtype)
         bias = paddle.fluid.data('Bias', self.bias_np.shape,
                                  self.bias_np.dtype)
         mean = paddle.fluid.data('Mean', self.mean_np.shape,
                                  self.mean_np.dtype)
         variance = paddle.fluid.data('Variance', self.variance_np.shape,
                                      self.variance_np.dtype)
         y = F.batch_norm(x, mean, variance, scale, bias, False,
                          self.momentum, self.epsilon, self.data_layout)
         exe = paddle.static.Executor(self.place)
         [y_np] = exe.run(feed={
             'X': self.x_np,
             'Scale': self.scale_np,
             'Bias': self.bias_np,
             'Mean': self.mean_np,
             'Variance': self.variance_np
         },
                          fetch_list=[y])
     y_np_ref = ref_batch_norm_infer(self.x_np, self.scale_np, self.bias_np,
                                     self.mean_np, self.variance_np,
                                     self.momentum, self.epsilon,
                                     self.data_layout)
     self.assertEqual(np.allclose(y_np_ref, y_np), True)
Exemplo n.º 2
0
    def forward(self, input, time):

        self._check_data_format(self._data_format)

        self._check_input_dim(input)

        if time >= self.T_max:
            time = self.T_max - 1

        running_mean = getattr(self, 'running_mean_{}'.format(time))
        running_var = getattr(self, 'running_var_{}'.format(time))

        if self.training:
            warnings.warn(
                "When training, we now always track global mean and variance.")

        return F.batch_norm(
            input,
            # self._mean,
            # self._variance,
            running_mean,
            running_var,
            weight=self.weight,
            bias=self.bias,
            training=self.training,
            momentum=self._momentum,
            epsilon=self._epsilon,
            data_format=self._data_format,
        )
Exemplo n.º 3
0
    def forward(self, input):
        self._check_data_format(self._data_format)
        self._check_input_dim(input)

        feature_dim = int(input.shape[1])

        weight = self.weight[:feature_dim]
        bias = self.bias[:feature_dim]
        mean = self._mean[:feature_dim]
        variance = self._variance[:feature_dim]

        return F.batch_norm(
            input,
            mean,
            variance,
            weight=weight,
            bias=bias,
            training=self.training,
            momentum=self._momentum,
            epsilon=self._epsilon,
            data_format=self._data_format)
Exemplo n.º 4
0
    def forward(self, x, params=None, bn_training=True):
        """
        :param x: 输入图片
        :param params:
        :param bn_training: set False to not update
        :return: 输出分类
        """
        if params is None:
            params = self.vars

        weight, bias = params[0], params[1]  # 第1个CONV层
        x = F.conv2d(x, weight, bias, stride=1, padding=1)
        weight, bias = params[2], params[3]  # 第1个BN层
        running_mean, running_var = self.vars_bn[0], self.vars_bn[1]
        x = F.batch_norm(x,
                         running_mean,
                         running_var,
                         weight=weight,
                         bias=bias,
                         training=bn_training)
        x = F.relu(x)  # 第1个relu
        x = F.max_pool2d(x, kernel_size=2)  # 第1个MAX_POOL层

        weight, bias = params[4], params[5]  # 第2个CONV层
        x = F.conv2d(x, weight, bias, stride=1, padding=1)
        weight, bias = params[6], params[7]  # 第2个BN层
        running_mean, running_var = self.vars_bn[2], self.vars_bn[3]
        x = F.batch_norm(x,
                         running_mean,
                         running_var,
                         weight=weight,
                         bias=bias,
                         training=bn_training)
        x = F.relu(x)  # 第2个relu
        x = F.max_pool2d(x, kernel_size=2)  # 第2个MAX_POOL层

        weight, bias = params[8], params[9]  # 第3个CONV层
        x = F.conv2d(x, weight, bias, stride=1, padding=1)
        weight, bias = params[10], params[11]  # 第3个BN层
        running_mean, running_var = self.vars_bn[4], self.vars_bn[5]
        x = F.batch_norm(x,
                         running_mean,
                         running_var,
                         weight=weight,
                         bias=bias,
                         training=bn_training)
        x = F.relu(x)  # 第3个relu
        x = F.max_pool2d(x, kernel_size=2)  # 第3个MAX_POOL层

        weight, bias = params[12], params[13]  # 第4个CONV层
        x = F.conv2d(x, weight, bias, stride=1, padding=1)
        weight, bias = params[14], params[15]  # 第4个BN层
        running_mean, running_var = self.vars_bn[6], self.vars_bn[7]
        x = F.batch_norm(x,
                         running_mean,
                         running_var,
                         weight=weight,
                         bias=bias,
                         training=bn_training)
        x = F.relu(x)  # 第4个relu
        x = F.max_pool2d(x, kernel_size=2)  # 第4个MAX_POOL层

        x = paddle.reshape(x, [x.shape[0], -1])  ## flatten
        weight, bias = params[-2], params[-1]  # linear
        x = F.linear(x, weight, bias)

        output = x

        return output