def forward(self, x): out = self.relu(self.conv_input(x)) conv1 = self.conv_input2(out) convt_F11 = self.convt_F11(conv1) convt_F12 = self.convt_F12(convt_F11) convt_F13 = self.convt_F13(convt_F12) res = reduce(apply, self.ures, convt_F13) im = reduce(apply, self.uim, out) u11 = self.convt_R1(res) u12 = self.convt_I1(im) HR = u11 + u12 return [HR]
def forward(self, x): out = self.relu(self.conv_input(x)) convt_F = reduce(forwardPath, zip(self.convt_F, self.s_conv), (out, out))[0] combine = out + self.non_local(convt_F) #上采样 up = self.u(combine) clean = self.convt_shape1(up) return clean
def forward(self, x): return reduce((lambda a, fs: (fs[0](a[0]), fs[2](fs[1](a[0])) + a[1])), zip(self.down2, self.branches, self.scales), (x, 0))
m.bias.data.zero_() genUpsampleBlock = lambda r: lambda in_channels, out_channels:\ nn.Sequential(Conv3x3(in_channels, out_channels, bias=True), nn.PixelShuffle(r), nn.PReLU()) upsample_block = genUpsampleBlock(2) upsample_block3 = genUpsampleBlock(3) Conv3x3 = lambda channelIn, channelOut, stride=1, bias=False:\ nn.Conv2d(in_channels=channelIn, out_channels=channelOut, kernel_size=3, stride=stride, padding=1, bias=bias) residual = lambda f, x, u: u + f(x) appendApply = lambda a, f: a + [f(a[-1])] multiConvt = lambda model, convt_R1, x, u:\ [residual(convt_R1, y, u) for y in reduce(appendApply, model.convt_F, [x])[1:]]\ if model.training else [residual(convt_R1, reduce(apply, model.convt_F, x), u)] namedSequential = lambda *args: nn.Sequential(OrderedDict(args)) isModule = lambda m: isinstance(m, nn.Module) _addModule = lambda model, name, m: model.add_module(name, m) if isModule(m) else None addModule = lambda model: lambda t: _addModule(model, *((t[1][0], t[1][1]) if type(t[1]) is tuple else (str(t[0]), t[1]))) addModules = lambda model, ms: tuple(map(addModule(model), enumerate(ms))) eF = lambda t: t[1] if type(t) is tuple else t extractFuncs = lambda args: map(eF, args) def toModule(f): def C(self, *fs): super(self.__class__, self).__init__() addModules(self, fs) self.f = f(*fs)