Example #1
0
 def forward(self, inputs: paddle.Tensor, ifshortcut: bool):
     y = self._expand_conv(inputs, if_act=True)
     y = self._bottleneck_conv(y, if_act=True)
     y = self._linear_conv(y, if_act=False)
     if ifshortcut:
         y = paddle.elementwise_add(inputs, y)
     return y
Example #2
0
 def forward(self, inputs: paddle.Tensor):
     short = self._short(inputs)
     conv0 = F.relu(inputs)
     conv1 = self._conv_1(conv0)
     conv2 = F.relu(conv1)
     conv2 = self._conv_2(conv2)
     pool = self._pool(conv2)
     return paddle.elementwise_add(x=short, y=pool)
Example #3
0
 def forward(self, inputs: paddle.Tensor):
     conv0 = F.relu(inputs)
     conv0 = self._conv_0(conv0)
     conv1 = F.relu(conv0)
     conv1 = self._conv_1(conv1)
     conv2 = F.relu(conv1)
     conv2 = self._conv_2(conv2)
     return paddle.elementwise_add(x=inputs, y=conv2)
Example #4
0
 def forward(self, inputs: paddle.Tensor):
     x = self.expand_conv(inputs)
     x = self.bottleneck_conv(x)
     if self.if_se:
         x = self.mid_se(x)
     x = self.linear_conv(x)
     if self.if_shortcut:
         x = paddle.elementwise_add(inputs, x)
     return x
Example #5
0
    def forward(self, inputs: paddle.Tensor):
        y = self.conv0(inputs)
        conv1 = self.conv1(y)

        if self.shortcut:
            short = inputs
        else:
            short = self.short(inputs)
        y = paddle.elementwise_add(x=short, y=conv1, act='relu')
        return y
Example #6
0
 def forward(self, inputs: paddle.Tensor):
     x = inputs
     if self.expand_ratio != 1:
         x = self._ecn(x)
         x = F.swish(x)
     x = self._dcn(x)
     x = F.swish(x)
     if self.has_se:
         x = self._se(x)
     x = self._pcn(x)
     if self.id_skip and \
             self.block_args.stride == 1 and \
             self.block_args.input_filters == self.block_args.output_filters:
         if self.drop_connect_rate:
             x = _drop_connect(x, self.drop_connect_rate, self.is_test)
         x = paddle.elementwise_add(x, inputs)
     return x
Example #7
0
def column_parallel_linear(input,
                           in_size,
                           out_size,
                           use_bias=True,
                           gather_out=True,
                           mp_rank=0,
                           mp_nranks=1,
                           dtype="float32",
                           param_attr=None,
                           bias_attr=None,
                           param_name=None,
                           bias_name=None,
                           ring_id=0):
    assert out_size % mp_nranks == 0
    out_size_per_part = out_size // mp_nranks
    weight = paddle.create_parameter(shape=[in_size, out_size_per_part],
                                     dtype=dtype,
                                     name=param_name,
                                     attr=param_attr,
                                     is_bias=False)
    weight.is_distributed = True
    paddle.static.default_startup_program().global_block().vars[
        weight.name].is_distributed = True
    paddle.static.default_main_program().global_block().vars[
        weight.name].is_distributed = True
    if use_bias:
        bias = paddle.create_parameter(shape=[out_size_per_part],
                                       dtype=dtype,
                                       name=bias_name,
                                       attr=param_attr,
                                       is_bias=True)
        bias.is_distributed = True
        paddle.static.default_startup_program().global_block().vars[
            bias.name].is_distributed = True
        paddle.static.default_main_program().global_block().vars[
            bias.name].is_distributed = True
    out = paddle.matmul(input, weight)
    if use_bias:
        out = paddle.elementwise_add(out, bias)
    if gather_out:
        output = []
        paddle.distributed.all_gather(output, out, group=ring_id)
        out = paddle.concat(output, axis=len(out.shape) - 1)
    return out
Example #8
0
 def forward(self, inputs: paddle.Tensor) -> paddle.Tensor:
     conv1 = self.conv1(inputs)
     conv2 = self.conv2(conv1)
     out = paddle.elementwise_add(x=inputs, y=conv2, act=None)
     return out
Example #9
0
import numpy
import paddle

# 定义输入数据占位符
a = paddle.nn.data(name="a", shape=[1], dtype='int64')
b = paddle.nn.data(name="b", shape=[1], dtype='int64')
# 组建网络(此处网络仅由一个操作构成,即elementwise_add)
result = paddle.elementwise_add(a, b)
# 准备运行网络
cpu = paddle.CPUPlace()  # 定义运算设备,这里选择在CPU下训练
exe = paddle.Executor(cpu)  # 创建执行器
# 创建输入数据
x = numpy.array([2])
y = numpy.array([3])
# 运行网络
outs = exe.run(
    feed={'a': x, 'b': y},  # 将输入数据x, y分别赋值给变量a,b
    fetch_list=[result]  # 通过fetch_list参数指定需要获取的变量结果
)
# 输出运行结果
print(outs)
# [array([5], dtype=int64)]

# end of file