def net(self, higher_res_feature, lower_res_feature): h, w = higher_res_feature.shape[2:] lower_res_feature = fluid.layers.resize_bilinear(lower_res_feature, [h, w], align_mode=0) with scope('dwconv'): lower_res_feature = relu( bn(conv(lower_res_feature, self.out_channels, 1))) #(lower_res_feature) with scope('conv_lower_res'): lower_res_feature = bn( conv(lower_res_feature, self.out_channels, 1, bias_attr=True)) with scope('conv_higher_res'): higher_res_feature = bn( conv(higher_res_feature, self.out_channels, 1, bias_attr=True)) out = higher_res_feature + lower_res_feature return relu(out)
def xception_block(self, input, channels, strides=1, filters=3, dilation=1, skip_conv=True, has_skip=True, activation_fn_in_separable_conv=False): repeat_number = 3 channels = check_data(channels, repeat_number) filters = check_data(filters, repeat_number) strides = check_data(strides, repeat_number) data = input results = [] for i in range(repeat_number): with scope('separable_conv' + str(i + 1)): if not activation_fn_in_separable_conv: data = relu(data) data = separate_conv( data, channels[i], strides[i], filters[i], dilation=dilation) else: data = separate_conv( data, channels[i], strides[i], filters[i], dilation=dilation, act=relu) results.append(data) if not has_skip: return data, results if skip_conv: param_attr = fluid.ParamAttr( name=name_scope + 'weights', regularizer=None, initializer=fluid.initializer.TruncatedNormal( loc=0.0, scale=0.09)) with scope('shortcut'): skip = bn( conv( input, channels[-1], 1, strides[-1], groups=1, padding=0, param_attr=param_attr)) else: skip = input return data + skip, results
def net(self, x): x, _ = inverted_blocks(x, self.in_channels, self.t, self.block_channels[0], self.num_blocks[0], 2, 'inverted_block_1') x, _ = inverted_blocks(x, self.block_channels[0], self.t, self.block_channels[1], self.num_blocks[1], 2, 'inverted_block_2') x, _ = inverted_blocks(x, self.block_channels[1], self.t, self.block_channels[2], self.num_blocks[2], 1, 'inverted_block_3') x = psp_module(x, self.block_channels[2] // 4) with scope('out'): x = relu(bn(conv(x, self.out_channels, 1))) return x
def learning_to_downsample(x, dw_channels1=32, dw_channels2=48, out_channels=64): x = relu(bn(conv(x, dw_channels1, 3, 2))) with scope('dsconv1'): x = separate_conv(x, dw_channels2, stride=2, filter=3, act=fluid.layers.relu) with scope('dsconv2'): x = separate_conv(x, out_channels, stride=2, filter=3, act=fluid.layers.relu) return x
def aux_layer(x, num_classes): x = relu(bn(conv(x, 32, 3, padding=1))) x = dropout2d(x, 0.1, is_train=(cfg.PHASE == 'train')) with scope('logit'): x = conv(x, num_classes, 1, bias_attr=True) return x