def separable_conv_with_bn(x, f, stride=False, aspp=False, atrous_rate=1, act_fn=True, last_block=False, end_point=False, eps=1e-03, out=False, test=False, fix_params=False): with nn.parameter_scope("depthwise"): if (stride == True): h = PF.depthwise_convolution(x, (3, 3), stride=(2, 2), pad=(1, 1), with_bias=False, fix_parameters=fix_params) elif (aspp == True): h = PF.depthwise_convolution(x, (3, 3), pad=(atrous_rate, atrous_rate), stride=(1, 1), dilation=(atrous_rate, atrous_rate), with_bias=False, fix_parameters=fix_params) else: h = PF.depthwise_convolution(x, (3, 3), pad=(1, 1), with_bias=False, fix_parameters=fix_params) h = PF.batch_normalization(h, batch_stat=not test, eps=eps, fix_parameters=fix_params) if last_block == True: h = F.relu(h) with nn.parameter_scope("pointwise"): h = PF.convolution(h, f, (1, 1), stride=(1, 1), with_bias=False, fix_parameters=fix_params) h = PF.batch_normalization(h, batch_stat=not test, eps=eps, fix_parameters=fix_params) if end_point == True: global endpoints endpoints['Decoder End Point 1'] = h if act_fn == True: h = F.relu(h) return h
def shuffle_unit(x, scope_name, dn=False): """ Figure. 2 (b) and (c) in https://arxiv.org/pdf/1707.01083.pdf """ C = x.shape[1] h = x with nn.parameter_scope(scope_name): with nn.parameter_scope("gconv1"): h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0), group=groups, with_bias=False) h = PF.batch_normalization(h, batch_stat=not test) h = F.relu(h, True) with nn.parameter_scope("shuffle"): # no meaning but semantics h = shuffle(h) with nn.parameter_scope("dconv"): stride = (2, 2) if dn else (1, 1) h = PF.depthwise_convolution(h, kernel=(3, 3), pad=(1, 1), stride=stride, with_bias=False) h = PF.batch_normalization(h, batch_stat=not test) with nn.parameter_scope("gconv2"): h = PF.convolution(h, C, kernel=(1, 1), pad=(0, 0), group=groups, with_bias=False) h = PF.batch_normalization(h, batch_stat=not test) s = F.average_pooling(x, (2, 2)) if dn else x h = F.concatenate(*[h, s], axis=1) if dn else h + s h = F.relu(h) return h
def network(x, y, test=False): # Input:x -> 3,64,64 # AveragePooling -> 3,12,21 h = F.average_pooling(x, (5, 3), (5, 3)) # LeakyReLU_2 h = F.leaky_relu(h, 0.1, True) # Convolution_2 -> 20,13,21 h = PF.convolution(h, 20, (2, 3), (1, 1), name='Convolution_2') # BatchNormalization h = PF.batch_normalization(h, (1, ), 0.9, 0.0001, not test, name='BatchNormalization') # ReLU h = F.relu(h, True) # DepthwiseConvolution h = PF.depthwise_convolution(h, (5, 5), (2, 2), name='DepthwiseConvolution') # MaxPooling_2 -> 20,6,7 h = F.max_pooling(h, (2, 3), (2, 3)) # LeakyReLU h = F.leaky_relu(h, 0.1, True) # Affine -> 2 h = PF.affine(h, (2, ), name='Affine') # Softmax h = F.softmax(h) return h
def test_FLOPsEstimator(): x = nn.Variable((1, 3, 12, 12)) y = PF.depthwise_convolution(x, kernel=(5, 5), with_bias=True) t = PF.fused_batch_normalization(y) z = F.relu6(F.sigmoid(PF.affine(t, (3, 3), base_axis=2) + 3)) z = F.global_average_pooling(z) est = FLOPsEstimator() assert est.predict(z) == 17644
def depthwise_separable_conv5x5(x, output_filter, scope, test): """ depthwise separable convolution with kernel 5x5. """ with nn.parameter_scope(scope): h = conv1x1(x, output_filter, scope, test) h = PF.depthwise_convolution(h, (5, 5), (2, 2), with_bias=False) h = PF.convolution(h, output_filter, (1, 1), with_bias=False) h = PF.batch_normalization(h, batch_stat=not test) h = F.relu(h) return h
def test_parametric_function_2d(inshape, kernel, multiplier, outshape): base_axis = len(inshape) - 3 sample_channels = inshape[base_axis] outmap_channels = sample_channels * multiplier x = nn.Variable(inshape) y = PF.depthwise_convolution(x, kernel, multiplier=multiplier) p = nn.get_parameters() assert y.shape == outshape assert p['depthwise_conv/W'].shape == (outmap_channels,) + kernel assert p['depthwise_conv/b'].shape == (outmap_channels,) nn.clear_parameters()