示例#1
0
    def __init__(self, in_ch):
        super(Encoder, self).__init__()
        self.net = []
        self.cnn = nn.Conv2d(in_ch,64,3,1,1,bias=True)
        self.net += [self.cnn]       
        self.net += [Conv2dBlock(First_block=1)]
        self.net += [Conv2dBlock(First_block=0)]
        self.net += [Conv2dBlock(First_block=0)]

        self.net = nn.Sequential(*self.net)
        init_weights(self.cnn,init_type='kaiming')
示例#2
0
    def __init__(self, in_ch=384, AdapNorm=True, AdapNorm_attention_flag='1layer', model_initial='kaiming'):
        super(FeatEmbedder, self).__init__()
        self.AdapNorm = AdapNorm
        self.AdapNorm_attention_flag = AdapNorm_attention_flag
        self.model_initial = model_initial

        self.conv_block1 = Conv_block(in_ch, 128, self.AdapNorm, self.AdapNorm_attention_flag, self.model_initial)
        self.conv_block2 = Conv_block(128, 256, self.AdapNorm, self.AdapNorm_attention_flag, self.model_initial)
        self.conv_block3 = Conv_block(256, 512, self.AdapNorm, self.AdapNorm_attention_flag, self.model_initial)
        self.max_pool = nn.MaxPool2d(2)
        self.global_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Linear(512, 2)

        # model initial
        init_weights(self.fc, init_type=self.model_initial)
示例#3
0
    def __init__(self, in_ch, out_ch, AdapNorm=False, AdapNorm_attention_flag=False, model_initial='kaiming'):
        '''
            Args:
                in_ch (int): the channel numbers of input features
                out_ch (int): the channel numbers of output features
                AdapNorm (bool): 
                    'True' allow the Conv_block to combine BN and IN
                    'False' allow the Conv_block to use BN
                AdapNorm_attention_flag (str):
                    '1layer' allow the Conv_block to use 1layer FC to generate the balance factor
                    '2layer' allow the Conv_block to use 2layer FC to generate the balance factor
                model_initial (str):
                    'kaiming' allow the Conv_block to use 'kaiming' methods to initialize the networks
        '''
        super(Conv_block, self).__init__()
        self.AdapNorm = AdapNorm
        self.AdapNorm_attention_flag = AdapNorm_attention_flag
        self.model_initial = model_initial

        self.conv = conv3x3(in_ch, out_ch)

        if self.AdapNorm:
            self.BN = nn.BatchNorm2d(out_ch)
            self.IN = nn.InstanceNorm2d(out_ch, affine=True)
            self.global_pool = nn.AdaptiveAvgPool2d(1)

            if self.AdapNorm_attention_flag is not None:
                if self.AdapNorm_attention_flag == '1layer':
                    self.AttentionNet = nn.Linear(out_ch, out_ch)
                elif self.AdapNorm_attention_flag == '2layer':
                    self.AttentionNet_fc1 = nn.Linear(out_ch, out_ch // 16 + 1)
                    self.AttentionNet_fc2 = nn.Linear(out_ch // 16 + 1, out_ch)

                self.sigmoid = nn.Sigmoid()

            else:
                self.gate_weight = torch.nn.Parameter(torch.Tensor(out_ch))
                self.gate_bias = torch.nn.Parameter(torch.Tensor(out_ch))
                self.gate_weight.data.fill_(0.5)
                self.gate_bias.data.fill_(0)
        else:
            self.IN = None
            self.BN = nn.BatchNorm2d(out_ch)

        self.relu = nn.ReLU(inplace=True)

        # model initial
        init_weights(self.conv, init_type=self.model_initial)
示例#4
0
    def __init__(self, in_ch):
        super(Auxilary_Deep,self).__init__()
        self.in_ch = in_ch
        self.model=[]
        self.model+=[nn.Conv2d(self.in_ch, 256, kernel_size=3, stride=1, padding=1)]
        self.model+=[nn.BatchNorm2d(256)]
        self.model+=[nn.ReLU()]

        self.model+=[nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)]
        self.model+=[nn.BatchNorm2d(128)]
        self.model+=[nn.ReLU()]

        self.model+=[nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1)]
        self.model+=[nn.BatchNorm2d(64)]
        self.model+=[nn.ReLU()]

        self.model+=[nn.Conv2d(64, 1, kernel_size=3, stride=1, padding=1)]
        self.model+=[nn.BatchNorm2d(1)]
        self.model+=[nn.ReLU()]

        self.model = nn.Sequential(*self.model)
        init_weights(self.model,init_type='kaiming')
示例#5
0
    def __init__(self, First_block=False):
        super(Conv2dBlock, self).__init__()
        self.net = []
        if First_block:
            self.net += [nn.Conv2d(64, 128, 3, 1, 1, bias=True)]
        else:
            self.net += [nn.Conv2d(128, 128, 3, 1, 1, bias=True)]

        self.net += [nn.ELU()]
        self.net += [nn.BatchNorm2d(128)]

        self.net += [nn.Conv2d(128, 196, 3, 1, 1, bias=True)]
        self.net += [nn.ELU()]
        self.net += [nn.BatchNorm2d(196)]

        self.net += [nn.Conv2d(196, 128, 3, 1, 1, bias=True)]
        self.net += [nn.ELU()]
        self.net += [nn.BatchNorm2d(128)]

        self.net += [nn.AvgPool2d(2, stride=2)]

        self.net = nn.Sequential(*self.net)
        init_weights(self.net, init_type='kaiming')
示例#6
0
    def __init__(self,in_ch):
        super(Similarity, self).__init__()
        self.in_ch = in_ch
        self.model=[]
        self.model+=[nn.Conv2d(self.in_ch, 128,kernel_size=3, stride=1, padding=1)]
        self.model+=[nn.BatchNorm2d(128)]
        self.model+=[nn.ReLU()]  

        self.model+=[nn.AvgPool2d(kernel_size=2, stride=2)]

        self.model+=[nn.Conv2d(128,64,kernel_size=3, stride=1, padding=1)]
        self.model+=[nn.BatchNorm2d(64)]
        self.model+=[nn.ReLU()]

        self.model+=[nn.AvgPool2d(kernel_size=2, stride=2)]

        self.model+=[nn.Conv2d(64,64,kernel_size=3, stride=1, padding=1)]
        self.model+=[nn.BatchNorm2d(64)]
        self.model+=[nn.ReLU()]

        self.model+=[nn.AvgPool2d(kernel_size=2, stride=2)]

        self.model = nn.Sequential(*self.model)
        init_weights(self.model,init_type='kaiming')