def __init__(self, out_ch=3, bn=False, single=False, para_reduce=1): super(Regressor_ff, self).__init__() # layers = [Rse_block(512+256, 128, bn=True, single=True), # Rse_block(128, 64, bn=True, single=True), # Rse_block(64, 8, bn=True, single=True)] layers = [ Rse_block(512 // para_reduce, 256 // para_reduce, bn=bn, single=single, DR=True), Rse_block(256 // para_reduce, 128 // para_reduce, pool=False, bn=bn, single=single), # nn.Dropout2d(0.5), Rse_block(128 // para_reduce, 64, bn=bn, single=single, DR=True), Rse_block(64, 32, pool=False, bn=bn, single=single), # nn.Dropout2d(0.5), Rse_block(32, 16, bn=bn, single=single) ] self.f1 = nn.Linear(16 * 5 * 5, 256) self.f2 = nn.Linear(256, 4) self.clasifier = nn.Sequential(*layers)
def __init__(self, in_ch=3, bn=False, para_reduce=1): super(Extractor, self).__init__() self.E1 = Rse_block(in_ch, 32 // para_reduce, pool=False, bn=bn) # self.E1 = Rse_block(in_ch, 64, pool=False) self.E2 = Rse_block(32 // para_reduce, 64 // para_reduce, bn=bn) # self.E2_ri = Rse_block(32, 64) self.E3 = Rse_block(64 // para_reduce, 256 // para_reduce, bn=bn) self.E4 = Rse_block(256 // para_reduce, 512 // para_reduce, last=True)
def __init__(self, out_ch=3, num_perm=2, bn=True, para_reduce=1): super(Sorter, self).__init__() self.E1 = Rse_block(512 // para_reduce, 256 // para_reduce, bn=bn) self.E2 = Rse_block(256 // para_reduce, 128 // para_reduce, bn=bn) self.E3 = Rse_block(128 // para_reduce, 64, bn=bn) self.f1 = nn.Linear(64 * 5 * 5, 512 // para_reduce) self.f2 = nn.Linear(512 // para_reduce, 256 // para_reduce) self.f3 = nn.Linear(256 // para_reduce, num_perm) # self.E4 = Rse_block(64, 32, bn=bn) # self.E5 = nn.Conv2d(32, num_perm, kernel_size=[2,2], stride=1, padding=0) self.num_perm = num_perm
def __init__(self, out_ch=3, num_perm=2, bn=True): super(Sorter, self).__init__() self.E1 = Rse_block(512, 256, bn=bn) self.E2 = Rse_block(256, 128, bn=bn) self.E3 = Rse_block(128, 32, bn=bn) self.f1 = nn.Linear(32 * 5 * 5, 512) self.f2 = nn.Linear(512, 256) self.f3 = nn.Linear(256, num_perm) # self.E4 = Rse_block(64, 32, bn=bn) # self.E5 = nn.Conv2d(32, num_perm, kernel_size=[2,2], stride=1, padding=0) self.num_perm = num_perm
def __init__(self, out_ch=3, bn=False): super(Regressor, self).__init__() # layers = [Rse_block(512+256, 128, bn=True, single=True), # Rse_block(128, 64, bn=True, single=True), # Rse_block(64, 8, bn=True, single=True)] layers = [ Rse_block(512 + 256, 128, bn=bn, single=True), # Rse_block(128, 128, bn=True, single=True), # Rse_block(128, 128, bn=True, single=True), Rse_block(128, 64, bn=bn, single=True), Rse_block(64, 8, bn=bn, single=True) ] self.f1 = nn.Linear(8 * 5 * 5, 4) # self.f2 = nn.Linear(64, 2) self.clasifier = nn.Sequential(*layers) self.num_perm = 2
def __init__(self, out_ch=3, bn=False, para_reduce=1): super(Discriminator, self).__init__() # layers = [Rse_block(512+256, 128, bn=True, single=True), # Rse_block(128, 64, bn=True, single=True), # Rse_block(64, 8, bn=True, single=True)] layers = [ Rse_block((256 + 64) // para_reduce, 64 // para_reduce, bn=bn, single=True), Rse_block(64 // para_reduce, 64 // para_reduce, bn=bn, single=True), Rse_block(64 // para_reduce, 8, bn=bn, single=True) ] self.E2 = Rse_block(32 // para_reduce, 32 // para_reduce, bn=bn) self.E3 = Rse_block((64 + 32) // para_reduce, 64 // para_reduce, bn=bn) self.f1 = nn.Linear(8 * 5 * 5, 128 // para_reduce) self.f2 = nn.Linear(128 // para_reduce, 2) self.clasifier = nn.Sequential(*layers) self.num_perm = 2
def __init__(self, in_ch=3, out_ch=2): super(SUNET, self).__init__() self.in_ch = in_ch self.E1 = Rse_block(in_ch, 64, pool=False) self.E1_iso = Rse_block(1, 64, pool=False) self.E2 = Rse_block(64, 128) self.E3 = Rse_block(128, 256) self.E4 = Rse_block(256, 512, last=True) self.D3 = Rse_blockT(512 + 256, 256) self.D2 = Rse_blockT(256 + 128, 128) self.D1 = Rse_blockT(128 + 64, 64, last=True) self.conv_final = nn.Conv2d(64, out_ch, 1, 1) self.short1 = Short_cut_block(64, 64) self.short2 = Short_cut_block(128, 128) self.short3 = Short_cut_block(256, 256) self.criterion = nn.MSELoss() self.opt_all = optim.Adam(self.parameters(), lr=0.001) self.scheduler = StepLR(self.opt_all, step_size=1, gamma=0.5) self.out_ch = out_ch
def __init__(self, in_ch=3, out_ch=2, atten=False): super(SUNET, self).__init__() base_size = 64 self.extractor = FeatExtractor(ch_in=1, batchNorm=True, base_size=base_size*2) self.in_ch = in_ch self.E1 = Rse_block(16*16, base_size,pool=False) self.E2 = Rse_block(base_size, base_size*2) self.E3 = Rse_block(base_size * 2, base_size * 4) self.E4 = Rse_block(base_size * 4, base_size * 8, last=True) self.D3 = Rse_blockT(base_size * 8 + base_size * 4, base_size * 4) self.D2 = Rse_blockT(base_size * 4 + base_size * 2, base_size * 2) self.D1 = Rse_blockT(base_size * 2 + base_size, base_size, last=True) self.comp1 = nn.Conv2d(base_size * 2, base_size, 1) self.comp2 = nn.Conv2d(base_size * 4, base_size * 2, 1) self.comp3 = nn.Conv2d(base_size * 8, base_size * 4, 1) self.conv_final = nn.Conv2d(base_size, out_ch, 1, 1) self.short1 = Short_cut_block(base_size, base_size) self.short2 = Short_cut_block(base_size * 2, base_size * 2) self.short3 = Short_cut_block(base_size * 4, base_size * 4) self.criterion = nn.MSELoss() self.opt_all = optim.Adam(self.parameters(), lr=0.001) self.scheduler = StepLR(self.opt_all, step_size=1, gamma=0.5) self.out_ch = out_ch
def __init__(self, in_ch=3): super(Compensator, self).__init__() self.E1 = Rse_block(in_ch, 32, pool=False) self.E2 = Rse_block(32, 64) self.E3 = Rse_block(64, 256)
def __init__(self, in_ch=3): super(Extractor, self).__init__() self.E1 = Rse_block(in_ch, 32, pool=False) self.E2 = Rse_block(64, 64) self.E3 = Rse_block(128, 256) self.E4 = Rse_block(256, 512, last=True)
def __init__(self, in_ch=1): super(Extractor, self).__init__() self.E1 = Rse_block(in_ch, 16, pool=False) self.E2 = Rse_block(16, 32) self.E3 = Rse_block(32, 128) self.E4 = Rse_block(128, 256, last=True)