def layer4_1(x): pad4_1 = F.pad(x, (1, 1, 1, 1), 'reflect') conv4_1 = PF.convolution( pad4_1, 256, kernel=( 3, 3), stride=( 1, 1), name='layer4_1.1') conv4_1 = F.instance_normalization( conv4_1, gamma=None, beta=None, channel_axis=1) conv4_1 = PF.prelu(conv4_1, name='layer4_1.3') pad4_2 = F.pad(conv4_1, (1, 1, 1, 1), 'reflect') conv4_2 = PF.convolution( pad4_2, 64, kernel=( 3, 3), stride=( 1, 1), name='layer4_1.5') conv4_2 = F.instance_normalization( conv4_2, gamma=None, beta=None, channel_axis=1) conv4_2 = PF.prelu(conv4_2, name='layer4_1.7') up4_1 = F.interpolate( conv4_2, scale=( 2, 2), mode='nearest', align_corners=False) return up4_1
def layer3_1(x): pad3_1 = F.pad(x, (1, 1, 1, 1), 'reflect') conv3_1 = PF.convolution( pad3_1, 128, kernel=( 3, 3), stride=( 1, 1), name='layer3_1.1') conv3_1 = F.instance_normalization( conv3_1, gamma=None, beta=None, channel_axis=1) conv3_1 = PF.prelu(conv3_1, name='layer3_1.3') pad3_2 = F.pad(conv3_1, (1, 1, 1, 1), 'reflect') conv3_2 = PF.convolution( pad3_2, 64, kernel=( 3, 3), stride=( 1, 1), name='layer3_1.5') conv3_2 = F.instance_normalization( conv3_2, gamma=None, beta=None, channel_axis=1) conv3_2 = PF.prelu(conv3_2, name='layer3_1.7') return conv3_2
def res_block(x, out_ch, name): with nn.parameter_scope(name): residual = x out = F.pad(x, (1, 1, 1, 1), 'reflect') out = PF.convolution( out, out_ch, kernel=( 3, 3), stride=( 1, 1), name='conv1') out = F.instance_normalization( out, gamma=None, beta=None, channel_axis=1) out = PF.prelu(out) out = F.pad(out, (1, 1, 1, 1), 'reflect') out = PF.convolution( out, out_ch, kernel=( 3, 3), stride=( 1, 1), name='conv2') out = F.instance_normalization( out, gamma=None, beta=None, channel_axis=1) out += residual out = PF.prelu(out) return out
def _normalize(x, norm_type, channel_axis=1): if norm_type.lower() == "in": return F.instance_normalization(x, gamma=None, beta=None, channel_axis=channel_axis) elif norm_type.lower() == "bn": return F.batch_normalization(x, gamma=None, beta=None, mean=None, variance=None, axes=channel_axis) else: raise ValueError("unknown norm_type: {}".format(norm_type))
def test_instance_normalization_forward_backward(seed, x_shape, batch_axis, channel_axis, output_stat): rng = np.random.RandomState(seed) input = np.array(rng.randn(*x_shape).astype(np.float32)) eps = 1e-05 stat_shape = tuple([ x_shape[i] if i in _force_list(batch_axis) + [ channel_axis, ] else 1 for i in range(len(x_shape)) ]) beta = rng.randn(*stat_shape).astype(np.float32) gamma = rng.randn(*stat_shape).astype(np.float32) x = nn.Variable.from_numpy_array(input) v_beta = nn.Variable.from_numpy_array(beta) v_gamma = nn.Variable.from_numpy_array(gamma) output = F.instance_normalization(x, v_beta, v_gamma, channel_axis, batch_axis, eps, output_stat) ref = ref_instance_normalization(input, beta, gamma, channel_axis, batch_axis, eps, output_stat) if output_stat: tmp = F.sink(*output) tmp.forward() tmp.backward() for o, r in zip(output, ref): assert o.shape == r.shape assert_allclose(o.d, r, atol=1e-2, rtol=1e-5) else: output.forward() output.backward() assert output.shape == ref.shape assert_allclose(output.d, ref, atol=1e-2, rtol=1e-5)
def ins_norm(x): conv_i_norm = F.instance_normalization(x, gamma=None, beta=None, channel_axis=1) return conv_i_norm