Exemplo n.º 1
0
    def __init__(self, p=1, t=2, r=1):
        """
		:param p: the number of pre-processing Residual Units before splitting into trunk branch and mask branch
		:param t: the number of Residual Units in trunk branch
		:param r: the number of Residual Units between adjacent pooling layer in the mask branch
		"""
        self.p = p
        self.t = t
        self.r = r

        self.residual_block = ResidualBlock()
Exemplo n.º 2
0
    def __init__(self):

        super(ResidualAttentionModel, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(32), nn.ReLU(inplace=True))
        self.rb1 = ResidualBlock(32, 1)
        self.mpool1 = nn.MaxPool2d(kernel_size=2)
        self.features = nn.Sequential(AttentionModule_stg0(32, 32))
        self.classifier = nn.Sequential(  # dimension reduction
            CRResidualBlock(32, 8, (4, 16)), CRResidualBlock(8, 4, (8, 32)),
            CRResidualBlock(4, 2, (16, 64)), CRResidualBlock(2, 1, (32, 128)))
        self.mpool2 = nn.Sequential(  # dimension reduction
            nn.BatchNorm2d(1), nn.ReLU(inplace=True),
            nn.AvgPool2d(kernel_size=(3, 20), stride=2))
        self.fc = nn.Linear(189, 1)

        ## Weights initialization
        def _weights_init(m):
            classname = m.__class__.__name__
            if classname.find('Conv') != -1:
                xavier_normal_(m.weight)
            elif classname.find('Linear') != -1:
                xavier_normal_(m.weight)
                m.bias.data.zero_()
            elif classname.find('BatchNorm') != -1:
                m.weight.data.fill_(1)
                m.bias.data.zero_()

        self.apply(_weights_init)
Exemplo n.º 3
0
 def __init__(self, num_classes):
     super(ResidualAttentionModel_448input, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2D(3,
                   64,
                   kernel_size=7,
                   stride=2,
                   padding=3,
                   bias_attr=False), nn.BatchNorm2D(64), nn.ReLU())
     self.mpool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
     # tbq add
     # 112*112
     self.residual_block0 = ResidualBlock(64, 128)
     self.attention_module0 = AttentionModule_stage0(128, 128)
     # tbq add end
     self.residual_block1 = ResidualBlock(128, 256, 2)
     # 56*56
     self.attention_module1 = AttentionModule_stage1(256, 256)
     self.residual_block2 = ResidualBlock(256, 512, 2)
     self.attention_module2 = AttentionModule_stage2(512, 512)
     self.attention_module2_2 = AttentionModule_stage2(512, 512)  # tbq add
     self.residual_block3 = ResidualBlock(512, 1024, 2)
     self.attention_module3 = AttentionModule_stage3(1024, 1024)
     self.attention_module3_2 = AttentionModule_stage3(1024,
                                                       1024)  # tbq add
     self.attention_module3_3 = AttentionModule_stage3(1024,
                                                       1024)  # tbq add
     self.residual_block4 = ResidualBlock(1024, 2048, 2)
     self.residual_block5 = ResidualBlock(2048, 2048)
     self.residual_block6 = ResidualBlock(2048, 2048)
     self.mpool2 = nn.Sequential(nn.BatchNorm2D(2048), nn.ReLU(),
                                 nn.AvgPool2D(kernel_size=7, stride=1))
     self.fc = nn.Linear(2048, num_classes)
 def __init__(self):
     super(ResidualAttentionModel_448input, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias = False),
         nn.BatchNorm2d(64),
         nn.ReLU(inplace=True)
     )
     self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     # tbq add
     # 112*112
     self.residual_block0 = ResidualBlock(64, 128)
     self.attention_module0 = AttentionModule_stage0(128, 128)
     # tbq add end
     self.residual_block1 = ResidualBlock(128, 256, 2)
     # 56*56
     self.attention_module1 = AttentionModule_stage1(256, 256)
     self.residual_block2 = ResidualBlock(256, 512, 2)
     self.attention_module2 = AttentionModule_stage2(512, 512)
     self.attention_module2_2 = AttentionModule_stage2(512, 512)  # tbq add
     self.residual_block3 = ResidualBlock(512, 1024, 2)
     self.attention_module3 = AttentionModule_stage3(1024, 1024)
     self.attention_module3_2 = AttentionModule_stage3(1024, 1024)  # tbq add
     self.attention_module3_3 = AttentionModule_stage3(1024, 1024)  # tbq add
     self.residual_block4 = ResidualBlock(1024, 2048, 2)
     self.residual_block5 = ResidualBlock(2048, 2048)
     self.residual_block6 = ResidualBlock(2048, 2048)
     self.mpool2 = nn.Sequential(
         nn.BatchNorm2d(2048),
         nn.ReLU(inplace=True),
         nn.AvgPool2d(kernel_size=7, stride=1)
     )
     #-----self.fc = nn.Linear(2048,10)
     #-----channel should be 14 because there are 14 classes
     self.fc = nn.Linear(2048,14)
Exemplo n.º 5
0
 def __init__(self, class_num):
     super(ResidualAttentionModel, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias = False),
         nn.BatchNorm2d(64),
         nn.ReLU(inplace=True)
     )
     self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.residual_block1 = ResidualBlock(64, 128)
     self.attention_module1 = AttentionModule(128, 128, (112, 112), (56, 56), (28, 28))
     self.residual_block2 = ResidualBlock(128, 256, 2)
     self.attention_module2 = AttentionModule(256, 256, (56, 56), (28, 28), (14, 14))
     self.residual_block3 = ResidualBlock(256, 512, 2)
     self.attention_module3 = AttentionModule(512, 512, (28, 28), (14, 14), (7, 7))
     self.residual_block4 = ResidualBlock(512, 1024, 2)
     self.residual_block4_2 = ResidualBlock(1024, 2048, 2)
     self.residual_block5 = ResidualBlock(2048, 2048)
     self.residual_block6 = ResidualBlock(2048, 2048)
     self.mpool2 = nn.Sequential(
         nn.BatchNorm2d(2048),
         nn.ReLU(inplace=True),
         nn.AvgPool2d(kernel_size=7, stride=1)
     )
     self.fc = nn.Linear(2048, class_num)
     self.classifier = ClassBlock(2048, class_num)
Exemplo n.º 6
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 size1=(28, 28),
                 size2=(14, 14)):
        super(AttentionModule_stage2, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.mpool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)

        self.softmax1_blocks = ResidualBlock(in_channels, out_channels)

        self.skip1_connection_residual_block = ResidualBlock(
            in_channels, out_channels)

        self.mpool2 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)

        self.softmax2_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.interpolation2 = nn.UpsamplingBilinear2D(size=size2)

        self.softmax3_blocks = ResidualBlock(in_channels, out_channels)

        self.interpolation1 = nn.UpsamplingBilinear2D(size=size1)

        self.softmax4_blocks = nn.Sequential(
            nn.BatchNorm2D(out_channels), nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.BatchNorm2D(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
Exemplo n.º 7
0
 def __init__(self, num_classes):
     super(ResidualAttentionModel_92_32input_update, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2D(3,
                   32,
                   kernel_size=3,
                   stride=1,
                   padding=1,
                   bias_attr=False), nn.BatchNorm2D(32), nn.ReLU())  # 32*32
     self.residual_block1 = ResidualBlock(32, 128)  # 32*32
     self.attention_module1 = AttentionModule_stage1_cifar(
         128, 128, size1=(32, 32), size2=(16, 16))  # 32*32
     self.residual_block2 = ResidualBlock(128, 256, 2)  # 16*16
     self.attention_module2 = AttentionModule_stage2_cifar(
         256, 256, size=(16, 16))  # 16*16
     self.attention_module2_2 = AttentionModule_stage2_cifar(
         256, 256, size=(16, 16))  # 16*16 # tbq add
     self.residual_block3 = ResidualBlock(256, 512, 2)  # 4*4
     self.attention_module3 = AttentionModule_stage3_cifar(512, 512)  # 8*8
     self.attention_module3_2 = AttentionModule_stage3_cifar(
         512, 512)  # 8*8 # tbq add
     self.attention_module3_3 = AttentionModule_stage3_cifar(
         512, 512)  # 8*8 # tbq add
     self.residual_block4 = ResidualBlock(512, 1024)  # 8*8
     self.residual_block5 = ResidualBlock(1024, 1024)  # 8*8
     self.residual_block6 = ResidualBlock(1024, 1024)  # 8*8
     self.mpool2 = nn.Sequential(nn.BatchNorm2D(1024), nn.ReLU(),
                                 nn.AvgPool2D(kernel_size=8))
     self.fc = nn.Linear(1024, num_classes)
Exemplo n.º 8
0
    def __init__(self, in_channels, out_channels, size=(8, 8)):
        super(AttentionModule_stage3_cifar, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.middle_2r_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.conv1_1_blocks = nn.Sequential(
            nn.BatchNorm2D(out_channels), nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.BatchNorm2D(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
Exemplo n.º 9
0
    def __init__(self, in_channels, out_channels, size=(8, 8)):
        super(AttentionModule_stage2_cifar, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.mpool1 = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)  # 4*4

        self.middle_2r_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.interpolation1 = nn.UpsamplingBilinear2D(size=size)  # 8*8

        self.conv1_1_blocks = nn.Sequential(
            nn.BatchNorm2D(out_channels), nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.BatchNorm2D(out_channels),
            nn.ReLU(),
            nn.Conv2D(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias_attr=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
 def __init__(self):
     super(ResidualAttentionModel_92_32input, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2d(3, 32, kernel_size=5, stride=1, padding=2, bias=False),
         nn.BatchNorm2d(32), nn.ReLU(inplace=True))  # 32*32
     self.mpool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)  # 16*16
     self.residual_block1 = ResidualBlock(32, 128)  # 16*16
     self.attention_module1 = AttentionModule_stage1_cifar(128,
                                                           128)  # 16*16
     self.residual_block2 = ResidualBlock(128, 256, 2)  # 8*8
     self.attention_module2 = AttentionModule_stage2_cifar(256, 256)  # 8*8
     self.attention_module2_2 = AttentionModule_stage2_cifar(
         256, 256)  # 8*8 # tbq add
     self.residual_block3 = ResidualBlock(256, 512, 2)  # 4*4
     self.attention_module3 = AttentionModule_stage3_cifar(512, 512)  # 4*4
     self.attention_module3_2 = AttentionModule_stage3_cifar(
         512, 512)  # 4*4 # tbq add
     self.attention_module3_3 = AttentionModule_stage3_cifar(
         512, 512)  # 4*4 # tbq add
     self.residual_block4 = ResidualBlock(512, 1024)  # 4*4
     self.residual_block5 = ResidualBlock(1024, 1024)  # 4*4
     self.residual_block6 = ResidualBlock(1024, 1024)  # 4*4
     self.mpool2 = nn.Sequential(nn.BatchNorm2d(1024),
                                 nn.ReLU(inplace=True),
                                 nn.AvgPool2d(kernel_size=4, stride=1))
     self.fc = nn.Linear(1024, 10)
 def __init__(self):
     super(ResidualAttentionModel_92, self).__init__()
     self.conv1 = nn.Sequential(
         nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias = False),
         nn.BatchNorm2d(64),
         nn.ReLU(inplace=True)
     )
     self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.residual_block1 = ResidualBlock(64, 256)
     self.attention_module1 = AttentionModule_stage1(256, 256)
     self.residual_block2 = ResidualBlock(256, 512, 2)
     self.attention_module2 = AttentionModule_stage2(512, 512)
     self.attention_module2_2 = AttentionModule_stage2(512, 512)  # tbq add
     self.residual_block3 = ResidualBlock(512, 1024, 2)
     self.attention_module3 = AttentionModule_stage3(1024, 1024)
     self.attention_module3_2 = AttentionModule_stage3(1024, 1024)  # tbq add
     self.attention_module3_3 = AttentionModule_stage3(1024, 1024)  # tbq add
     self.residual_block4 = ResidualBlock(1024, 2048, 2)
     self.residual_block5 = ResidualBlock(2048, 2048)
     self.residual_block6 = ResidualBlock(2048, 2048)
     self.mpool2 = nn.Sequential(
         nn.BatchNorm2d(2048),
         nn.ReLU(inplace=True),
         nn.AvgPool2d(kernel_size=7, stride=1)
     )
     self.fc = nn.Linear(2048,14)
     self.sigmoid_end = nn.Sigmoid()
Exemplo n.º 12
0
    def __init__(self, in_channels, out_channels, size1=(14, 14)):
        super(AttentionModule_stage3, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.softmax1_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.interpolation1 = nn.UpsamplingBilinear2d(size=size1)

        self.softmax2_blocks = nn.Sequential(
            nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True),
            nn.Conv2d(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False), nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
Exemplo n.º 13
0
    def __init__(self, in_channels, out_channels, size1=(16, 16),
                 size2=(8, 8)):
        super(AttentionModule_stage1_cifar, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)  # 8*8

        self.down_residual_blocks1 = ResidualBlock(in_channels, out_channels)

        self.skip1_connection_residual_block = ResidualBlock(
            in_channels, out_channels)

        self.mpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)  # 4*4

        self.middle_2r_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.interpolation1 = nn.UpsamplingBilinear2d(size=size2)  # 8*8

        self.up_residual_blocks1 = ResidualBlock(in_channels, out_channels)

        self.interpolation2 = nn.UpsamplingBilinear2d(size=size1)  # 16*16

        self.conv1_1_blocks = nn.Sequential(
            nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True),
            nn.Conv2d(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False), nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
    def __init__(self,
                 in_channels,
                 out_channels,
                 size1=(128, 545),
                 size2=(120, 529),
                 size3=(104, 497),
                 size4=(72, 186),
                 l1weight=0.2):

        super(AttentionModule_stg0, self).__init__()
        self.l1weight = l1weight
        self.pre = ResidualBlock(in_channels, 1)

        ## trunk branch
        self.trunk = nn.Sequential(ResidualBlock(in_channels, 1),
                                   ResidualBlock(in_channels, 1))
        ## softmax branch: bottom-up
        self.mp1 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
        self.sm1 = ResidualBlock(in_channels, (4, 8))
        self.skip1 = ResidualBlock(in_channels, 1)

        self.mp2 = nn.MaxPool2d(kernel_size=3, stride=(1, 1))
        self.sm2 = ResidualBlock(in_channels, (8, 16))
        self.skip2 = ResidualBlock(in_channels, 1)

        self.mp3 = nn.MaxPool2d(kernel_size=3, stride=(1, 2))
        self.sm3 = ResidualBlock(in_channels, (16, 32))
        self.skip3 = ResidualBlock(in_channels, 1)

        self.mp4 = nn.MaxPool2d(kernel_size=3, stride=(2, 2))
        self.sm4 = nn.Sequential(ResidualBlock(in_channels, (16, 32)),
                                 ResidualBlock(in_channels, 1))
        ## softmax branch: top-down
        self.up4 = nn.UpsamplingBilinear2d(size=size4)
        self.sm5 = ResidualBlock(in_channels, 1)
        self.up3 = nn.UpsamplingBilinear2d(size=size3)
        self.sm6 = ResidualBlock(in_channels, 1)
        self.up2 = nn.UpsamplingBilinear2d(size=size2)
        self.sm7 = ResidualBlock(in_channels, 1)
        self.up1 = nn.UpsamplingBilinear2d(size=size1)
        # 1*1 convolution blocks
        self.conv1 = nn.Sequential(
            nn.BatchNorm2d(in_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels,
                      in_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False),
            nn.BatchNorm2d(in_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(in_channels,
                      in_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False),
            #nn.Sigmoid()
            nn.Softmax2d())

        self.post = ResidualBlock(in_channels, 1)
Exemplo n.º 15
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 size1=(112, 112),
                 size2=(56, 56),
                 size3=(28, 28),
                 size4=(14, 14)):
        super(AttentionModule_stage0, self).__init__()
        self.first_residual_blocks = ResidualBlock(in_channels, out_channels)

        self.trunk_branches = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))

        self.mpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        # 56*56
        self.softmax1_blocks = ResidualBlock(in_channels, out_channels)

        self.skip1_connection_residual_block = ResidualBlock(
            in_channels, out_channels)

        self.mpool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        # 28*28
        self.softmax2_blocks = ResidualBlock(in_channels, out_channels)

        self.skip2_connection_residual_block = ResidualBlock(
            in_channels, out_channels)

        self.mpool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        # 14*14
        self.softmax3_blocks = ResidualBlock(in_channels, out_channels)
        self.skip3_connection_residual_block = ResidualBlock(
            in_channels, out_channels)
        self.mpool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        # 7*7
        self.softmax4_blocks = nn.Sequential(
            ResidualBlock(in_channels, out_channels),
            ResidualBlock(in_channels, out_channels))
        self.interpolation4 = nn.UpsamplingBilinear2d(size=size4)
        self.softmax5_blocks = ResidualBlock(in_channels, out_channels)
        self.interpolation3 = nn.UpsamplingBilinear2d(size=size3)
        self.softmax6_blocks = ResidualBlock(in_channels, out_channels)
        self.interpolation2 = nn.UpsamplingBilinear2d(size=size2)
        self.softmax7_blocks = ResidualBlock(in_channels, out_channels)
        self.interpolation1 = nn.UpsamplingBilinear2d(size=size1)

        self.softmax8_blocks = nn.Sequential(
            nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True),
            nn.Conv2d(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False), nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels,
                      out_channels,
                      kernel_size=1,
                      stride=1,
                      bias=False), nn.Sigmoid())

        self.last_blocks = ResidualBlock(in_channels, out_channels)
Exemplo n.º 16
0
class AttentionModule(object):
    """AttentionModuleClass"""
    def __init__(self, p=1, t=2, r=1):
        """
		:param p: the number of pre-processing Residual Units before splitting into trunk branch and mask branch
		:param t: the number of Residual Units in trunk branch
		:param r: the number of Residual Units between adjacent pooling layer in the mask branch
		"""
        self.p = p
        self.t = t
        self.r = r

        self.residual_block = ResidualBlock()

    def f_prop(self,
               input,
               input_channels,
               scope="attention_module",
               is_training=True):
        """
		f_prop function of attention module
		:param input: A Tensor. input data [batch_size, height, width, channel]
		:param input_channels: dimension of input channel.
		:param scope: str, tensorflow name scope
		:param is_training: boolean, whether training step or not(test step)
		:return: A Tensor [batch_size, height, width, channel]
		"""
        with tf.variable_scope(scope):

            # residual blocks(TODO: change this function)
            with tf.variable_scope("first_residual_blocks"):
                for i in range(self.p):
                    input = self.residual_block.f_prop(
                        input,
                        input_channels,
                        scope="num_blocks_{}".format(i),
                        is_training=is_training)

            with tf.variable_scope("trunk_branch"):
                output_trunk = input
                for i in range(self.t):
                    output_trunk = self.residual_block.f_prop(
                        output_trunk,
                        input_channels,
                        scope="num_blocks_{}".format(i),
                        is_training=is_training)

            with tf.variable_scope("soft_mask_branch"):

                with tf.variable_scope("down_sampling_1"):
                    # max pooling
                    filter_ = [1, 2, 2, 1]
                    output_soft_mask = tf.nn.max_pool(input,
                                                      ksize=filter_,
                                                      strides=filter_,
                                                      padding='SAME')

                    for i in range(self.r):
                        output_soft_mask = self.residual_block.f_prop(
                            output_soft_mask,
                            input_channels,
                            scope="num_blocks_{}".format(i),
                            is_training=is_training)

                with tf.variable_scope("skip_connection"):
                    # TODO(define new blocks)
                    output_skip_connection = self.residual_block.f_prop(
                        output_soft_mask,
                        input_channels,
                        is_training=is_training)

                with tf.variable_scope("down_sampling_2"):
                    # max pooling
                    filter_ = [1, 2, 2, 1]
                    output_soft_mask = tf.nn.max_pool(output_soft_mask,
                                                      ksize=filter_,
                                                      strides=filter_,
                                                      padding='SAME')

                    for i in range(self.r):
                        output_soft_mask = self.residual_block.f_prop(
                            output_soft_mask,
                            input_channels,
                            scope="num_blocks_{}".format(i),
                            is_training=is_training)

                with tf.variable_scope("up_sampling_1"):
                    for i in range(self.r):
                        output_soft_mask = self.residual_block.f_prop(
                            output_soft_mask,
                            input_channels,
                            scope="num_blocks_{}".format(i),
                            is_training=is_training)

                    # interpolation
                    output_soft_mask = UpSampling2D([2, 2])(output_soft_mask)

                # add skip connection
                output_soft_mask += output_skip_connection

                with tf.variable_scope("up_sampling_2"):
                    for i in range(self.r):
                        output_soft_mask = self.residual_block.f_prop(
                            output_soft_mask,
                            input_channels,
                            scope="num_blocks_{}".format(i),
                            is_training=is_training)

                    # interpolation
                    output_soft_mask = UpSampling2D([2, 2])(output_soft_mask)

                with tf.variable_scope("output"):
                    output_soft_mask = tf.layers.conv2d(output_soft_mask,
                                                        filters=input_channels,
                                                        kernel_size=1)
                    output_soft_mask = tf.layers.conv2d(output_soft_mask,
                                                        filters=input_channels,
                                                        kernel_size=1)

                    # sigmoid
                    output_soft_mask = tf.nn.sigmoid(output_soft_mask)

            with tf.variable_scope("attention"):
                output = (1 + output_soft_mask) * output_trunk

            with tf.variable_scope("last_residual_blocks"):
                for i in range(self.p):
                    output = self.residual_block.f_prop(
                        output,
                        input_channels,
                        scope="num_blocks_{}".format(i),
                        is_training=is_training)

            return output