def __init__(self, channel, channel_reduction=4, spatial_reduction=4, z_reduction=1): super(SELayer, self).__init__() self.pool_size = (z_reduction, spatial_reduction, spatial_reduction) self.se = nn.Sequential( nn.AvgPool3d(kernel_size=self.pool_size, stride=self.pool_size), nn.Conv3d(channel, channel // channel_reduction, kernel_size=1), SynchronizedBatchNorm3d(channel // channel_reduction), nn.ELU(inplace=True), nn.Conv3d(channel // channel_reduction, channel, kernel_size=1), SynchronizedBatchNorm3d(channel), nn.Sigmoid(), nn.Upsample(scale_factor=self.pool_size, mode='trilinear', align_corners=False), )
def conv3d_bn_elu(in_planes, out_planes, kernel_size=(3,3,3), stride=1, dilation=(1,1,1), padding=(1,1,1), bias=False): return nn.Sequential( conv3d_pad(in_planes, out_planes, kernel_size, stride, dilation, padding, bias), SynchronizedBatchNorm3d(out_planes), nn.ELU(inplace=True))
def __init__(self, channel, reduction=4): super(SELayer_cs, self).__init__() self.avg_pool = nn.AdaptiveAvgPool3d(1) self.fc = nn.Sequential(nn.Linear(channel, channel // reduction), SynchronizedBatchNorm1d(channel // reduction), nn.ELU(inplace=True), nn.Linear(channel // reduction, channel), SynchronizedBatchNorm1d(channel), nn.Sigmoid()) self.sc = nn.Sequential( nn.Conv3d(channel, 1, kernel_size=(1, 1, 1)), SynchronizedBatchNorm3d(1), nn.ELU(inplace=True), nn.MaxPool3d(kernel_size=(1, 8, 8), stride=(1, 8, 8)), conv3d_bn_elu(1, 1, kernel_size=(3, 3, 3), padding=(1, 1, 1)), nn.Upsample(scale_factor=(1, 8, 8), mode='trilinear', align_corners=False), nn.Conv3d(1, channel, kernel_size=(1, 1, 1)), SynchronizedBatchNorm3d(channel), nn.Sigmoid())
def __init__(self, num_filter=32, in_num=1, latent_dim=512, bias=True, training=True): super(_E, self).__init__() self.latent_dim = latent_dim self.training = training self.layer1 = nn.Sequential( nn.Conv3d(in_num, num_filter, kernel_size=(1, 4, 4), stride=(1, 2, 2), bias=bias, padding=(0, 1, 1)), SynchronizedBatchNorm3d(num_filter), nn.ELU(inplace=True)) self.layer2 = nn.Sequential( nn.Conv3d(num_filter, num_filter * 2, kernel_size=(1, 4, 4), stride=(1, 2, 2), bias=bias, padding=(0, 1, 1)), SynchronizedBatchNorm3d(num_filter * 2), nn.ELU(inplace=True)) self.layer3 = nn.Sequential( nn.Conv3d(num_filter * 2, num_filter * 4, kernel_size=(3, 4, 4), stride=(1, 2, 2), bias=bias, padding=(1, 1, 1)), SynchronizedBatchNorm3d(num_filter * 4), nn.ELU(inplace=True)) self.layer4 = nn.Sequential( nn.Conv3d(num_filter * 4, num_filter * 8, kernel_size=(3, 4, 4), stride=(1, 2, 2), bias=bias, padding=(1, 1, 1)), SynchronizedBatchNorm3d(num_filter * 8), nn.ELU(inplace=True)) self.layer5 = nn.Sequential( nn.Conv3d(num_filter * 8, num_filter * 16, kernel_size=4, stride=2, bias=bias, padding=1), SynchronizedBatchNorm3d(num_filter * 16), nn.ELU(inplace=True)) # predict both mean and variance self.f_conv1 = nn.Sequential( nn.Conv3d(num_filter * 16, self.latent_dim, kernel_size=4, stride=1, bias=bias, padding=0), Flatten()) self.f_conv2 = nn.Sequential( nn.Conv3d(num_filter * 16, self.latent_dim, kernel_size=4, stride=1, bias=bias, padding=0), Flatten()) for m in self.modules(): if isinstance(m, nn.Conv3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm3d): m.weight.data.fill_(1) m.bias.data.zero_()
def __init__(self, num_filter=32, out_num=1, latent_dim=512, bias=True): super(_G, self).__init__() self.latent_dim = latent_dim self.layer1 = nn.Sequential( nn.ConvTranspose3d(self.latent_dim, num_filter * 16, kernel_size=4, stride=1, bias=bias, padding=0), SynchronizedBatchNorm3d(num_filter * 16), nn.ELU(inplace=True)) self.layer2 = nn.Sequential( nn.ConvTranspose3d(num_filter * 16, num_filter * 8, kernel_size=4, stride=2, bias=bias, padding=1), SynchronizedBatchNorm3d(num_filter * 8), nn.ELU(inplace=True)) self.layer3 = nn.Sequential( nn.ConvTranspose3d(num_filter * 8, num_filter * 4, kernel_size=(3, 4, 4), stride=(1, 2, 2), bias=bias, padding=(1, 1, 1)), SynchronizedBatchNorm3d(num_filter * 4), nn.ELU(inplace=True)) self.layer4 = nn.Sequential( nn.ConvTranspose3d(num_filter * 4, num_filter * 2, kernel_size=(3, 4, 4), stride=(1, 2, 2), bias=bias, padding=(1, 1, 1)), SynchronizedBatchNorm3d(num_filter * 2), nn.ELU(inplace=True)) self.layer5 = nn.Sequential( nn.ConvTranspose3d(num_filter * 2, num_filter, kernel_size=(1, 4, 4), stride=(1, 2, 2), bias=bias, padding=(0, 1, 1)), SynchronizedBatchNorm3d(num_filter), nn.ELU(inplace=True)) self.layer6 = nn.Sequential( nn.ConvTranspose3d(num_filter, out_num, kernel_size=(1, 4, 4), stride=(1, 2, 2), bias=bias, padding=(0, 1, 1)) #SynchronizedBatchNorm3d(out_num) ) for m in self.modules(): if isinstance(m, nn.ConvTranspose3d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, SynchronizedBatchNorm3d): m.weight.data.fill_(1.0) m.bias.data.zero_()