def preresnet_bottleneck(l, ch_out, stride, preact): # stride is applied on the second conv, following fb.resnet.torch l, shortcut = apply_preactivation(l, preact) l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Grconv('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1) return l + resnet_shortcut(shortcut, ch_out * 4, stride)
def preresnet_bottleneck(l, ch_out, stride, preact): # stride is applied on the second conv, following fb.resnet.torch l, shortcut = apply_preactivation(l, preact) # 先预激活 l = Conv2D('conv1', l, ch_out, 1, activation=BNReLU) l = Grconv('conv2', l, ch_out, 3, strides=stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1) # l是F(x),shortcut是x!!!!!!!!!!!!!!!! return l + resnet_shortcut( shortcut, ch_out * 4, stride) # l 和shortcut可以相加的条件是他们的维度和通道数相同,让shortcut和ch*4相同
def resnet_bottleneck(l, ch_out, stride, stride_first=False): """ stride_first: original resnet put stride on first conv. fb.resnet.torch put stride on second conv. """ shortcut = l l = Conv2D('conv1', l, ch_out, 1, strides=stride if stride_first else 1, activation=BNReLU) l = Grconv('conv2', l, ch_out, 3, strides=1 if stride_first else stride, activation=BNReLU) l = Conv2D('conv3', l, ch_out * 4, 1, activation=get_bn(zero_init=True)) return l + resnet_shortcut( shortcut, ch_out * 4, stride, activation=get_bn(zero_init=False))
def preresnet_basicblock(l, ch_out, stride, preact): l, shortcut = apply_preactivation(l, preact) # 先预激活 # l是输入 ch_out是卷积核,3是kernel size l = Grconv('conv1', l, ch_out, 3, strides=stride, activation=BNReLU) l = Grconv('conv2', l, ch_out, 3) return l + resnet_shortcut(shortcut, ch_out, stride)
def resnet_basicblock(l, ch_out, stride): shortcut = l l = Grconv('conv1', l, ch_out, 3, strides=stride, activation=BNReLU) l = Grconv('conv2', l, ch_out, 3, activation=get_bn(zero_init=True)) return l + resnet_shortcut( shortcut, ch_out, stride, activation=get_bn(zero_init=False))