def __init__(self): super(VoxResNet_ResBlock, self).__init__() self.seq = nn.Sequential(BatchNorm3d(64), ReLU(), Conv3d(64, 64, (1, 3, 3), padding=(0, 1, 1)), BatchNorm3d(64), ReLU(), Conv3d(64, 64, (3, 3, 3), padding=1))
def __init__(self, config): super(VoxResNet, self).__init__() self.seq1 = nn.Sequential(Conv3d(1, 32, 3, padding=1), BatchNorm3d(32), ReLU(), Conv3d(32, 32, (1, 3, 3), padding=(0, 1, 1))) self.seq2 = nn.Sequential( BatchNorm3d(32), ReLU(), Conv3d(32, 64, 3, padding=1, stride=2), #MaxPool3d(2), VoxResNet_ResBlock(), VoxResNet_ResBlock()) self.seq3 = nn.Sequential( BatchNorm3d(64), ReLU(), Conv3d(64, 64, 3, padding=1, stride=2), #MaxPool3d(2, padding=(1,0,0)), VoxResNet_ResBlock(), VoxResNet_ResBlock()) self.seq4 = nn.Sequential( BatchNorm3d(64), ReLU(), Conv3d(64, 64, 3, padding=1, stride=2), #MaxPool3d(2, padding=(1,0,0)), VoxResNet_ResBlock(), VoxResNet_ResBlock()) """ # For Leiden dataset, 16 slices self.transposed1 = ConvTranspose3d(32, 2, 3, padding=1) self.transposed2 = ConvTranspose3d(64, 2, 3, stride=2, padding=1, output_padding=1) self.transposed3 = ConvTranspose3d(64, 2, 3, stride=4, padding=1, output_padding=3) self.transposed4 = ConvTranspose3d(64, 2, 3, stride=8, padding=1, output_padding=7) """ # For CR dataset, 18 slices self.transposed1 = ConvTranspose3d(32, 2, 3, padding=1) self.transposed2 = ConvTranspose3d(64, 2, 3, stride=2, padding=1, output_padding=1) self.transposed3 = ConvTranspose3d(64, 2, 3, stride=4, padding=1, output_padding=(1, 3, 3)) self.transposed4 = ConvTranspose3d(64, 2, 3, stride=8, padding=1, output_padding=(1, 7, 7))
def __init__(self, inp_feat, out_feat, kernel=3, stride=1, padding=1, residual=None): super(Conv3D_Block, self).__init__() self.conv1 = Sequential( Conv3d(inp_feat, out_feat, kernel_size=kernel, stride=stride, padding=padding, bias=True), BatchNorm3d(out_feat), ReLU()) self.conv2 = Sequential( Conv3d(out_feat, out_feat, kernel_size=kernel, stride=stride, padding=padding, bias=True), BatchNorm3d(out_feat), ReLU()) self.residual = residual if self.residual is not None: self.residual_upsampler = Conv3d(inp_feat, out_feat, kernel_size=1, bias=False)
def __init__(self, filters): super(UNet3D_ConvBlock, self).__init__() self.seq = nn.Sequential( Conv3d(filters[0], filters[1], 3, padding=1), BatchNorm3d(filters[1]), ReLU(), Conv3d(filters[1], filters[2], 3, padding=1), BatchNorm3d(filters[2]), ReLU(), )
def __init__(self, in_filters): super(ResNet, self).__init__() self.seq = nn.Sequential( ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1), ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1) )
def __init__(self, channels=32, refine=False): super(SGABlock, self).__init__() self.refine = refine if self.refine: self.bn_relu = nn.Sequential(BatchNorm3d(channels), nn.ReLU(inplace=True)) self.conv_refine = BasicConv(channels, channels, is_3d=True, kernel_size=3, padding=1, relu=False) # self.conv_refine1 = BasicConv(8, 8, is_3d=True, kernel_size=1, padding=1) else: self.bn = BatchNorm3d(channels) self.SGA=SGA() self.relu = nn.ReLU(inplace=True)
def __init__(self, in_filters, concat, growth_rate, dim_reduc=False, nonlinearity=torch.nn.functional.relu): super(RatLesNet_ResNetBlock, self).__init__() self.seq = nn.Sequential(ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1), ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1))
def __init__(self, channels, kernel_size): super(ResidualBlock, self).__init__() self.conv1 = ConvLayer(channels, channels, kernel_size=kernel_size, stride=1) self.in1 = BatchNorm3d(channels) self.conv2 = ConvLayer(channels, channels, kernel_size=kernel_size, stride=1) self.in2 = BatchNorm3d(channels) self.relu = ReLU()
def __init__(self, in_channels, out_channels, pv): super(ResidualBlock, self).__init__() self.res_branch2a = ConvTTN3d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1, project_variable=pv, bias=False) self.bn_branch2a = BatchNorm3d(out_channels) self.res_branch2b = ConvTTN3d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, padding=1, project_variable=pv, bias=False) self.bn_branch2b = BatchNorm3d(out_channels)
def __init__(self, in_filters, out_filters): super(Bottleneck, self).__init__() self.seq = nn.Sequential( ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, out_filters, 1) )
def __init__(self, in_channels): super(CANet, self).__init__() inter_channels = in_channels // 2 self.conv5a = nn.Sequential( nn.Conv3d(in_channels, inter_channels, 3, padding=1, bias=False), BatchNorm3d(inter_channels), nn.ReLU()) self.conv5c = nn.Sequential( nn.Conv3d(in_channels, inter_channels, 3, padding=1, bias=False), BatchNorm3d(inter_channels), nn.ReLU()) self.gcn = nn.Sequential( OrderedDict([("FeatureInteractionGraph%02d" % i, FeatureInteractionGraph(inter_channels, 30, kernel=1)) for i in range(1)])) self.dcn = nn.Sequential( OrderedDict([("ConvContextBranch%02d" % i, ConvContextBranch()) for i in range(1)])) self.crffusion_1 = CGACRF(inter_channels, inter_channels, inter_channels) self.crffusion_2 = CGACRF(inter_channels, inter_channels, inter_channels) self.crffusion_3 = CGACRF(inter_channels, inter_channels, inter_channels) self.crffusion_4 = CGACRF(inter_channels, inter_channels, inter_channels) self.crffusion_5 = CGACRF(inter_channels, inter_channels, inter_channels) self.conv51 = normal_conv_blocks(inter_channels, inter_channels) self.conv52 = normal_conv_blocks(inter_channels, inter_channels) self.upconv1 = normal_conv_blocks(240, 120) self.upconv2 = normal_conv_blocks(120, 60) self.upconv3 = normal_conv_blocks(120, 60) self.upconv4 = normal_conv_blocks(60, 30) self.upconv5 = normal_conv_blocks(60, 30) self.upconv6 = normal_conv_blocks(30, 30) self.final_conv = nn.Conv3d(30, 3, kernel_size=1, bias=True) self.upsample = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=True)
def __init__(self): super(ConvFrontend, self).__init__() self.conv = Conv3d(1, 64, (5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3)) self.norm = BatchNorm3d(64) self.pool = MaxPool3d((1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
def __init__(self, inplanes, outplanes, downsample=None, stride=1): super(BasicBlock3D, self).__init__() self.conv1 = Conv3d(inplanes, outplanes, kernel_size=(1, 3, 3), stride=stride, padding=(0, 1, 1), bias=True) self.bn1 = BatchNorm3d(outplanes) self.relu = ReLU(inplace=True) self.conv2 = Conv3d(outplanes, outplanes, kernel_size=(1, 3, 3), stride=stride, padding=(0, 1, 1), bias=True) self.bn2 = BatchNorm3d(outplanes) self.downsample = downsample
def __init__(self, inp_feat, out_feat, kernel=4, stride=2, padding=1): super(Deconv3D_Block, self).__init__() self.deconv = Sequential( ConvTranspose3d(inp_feat, out_feat, kernel_size=kernel, stride=stride, padding=padding, output_padding=0, bias=True), BatchNorm3d(out_feat), ReLU())
def __init__(self, in_channels, out_channels, pv): super(ConvolutionBlock, self).__init__() self.conv = ConvTTN3d(in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, padding=3, project_variable=pv, bias=False) self.bn_conv = BatchNorm3d(out_channels)
def __init__(self, in_filters): super(RatLesNetv2_DenseNet, self).__init__() self.seq = [] self.seq.append(nn.Sequential( ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1), )) self.seq.append(nn.Sequential( ReLU(), BatchNorm3d(in_filters*2), Conv3d(in_filters*2, in_filters, 3, padding=1), )) # Compression self.seq.append(nn.Sequential( BatchNorm3d(in_filters*3), Conv3d(in_filters*3, in_filters, 1), )) self.seq = nn.ModuleList(self.seq)
def __init__(self, in_filters, conv_num): super(RatLesNetv2_ResNet_v2, self).__init__() self.conv_num = conv_num self.seq = [] for i in range(conv_num): self.seq.append(nn.Sequential( ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1), )) self.seq = nn.ModuleList(self.seq)
def __init__(self, d_model): super(Convolutional_Feature_Extractor, self).__init__() #(N,Cin,D,H,W) self.conv3d1 = Conv3d(in_channels=3, out_channels=32, kernel_size=(5, 5, 5), padding=(2, 2, 2) # ,stride=(1,2,2) ) self.mp3d1 = MaxPool3d(kernel_size=(1, 2, 2)) self.bn1 = BatchNorm3d(32) self.basicblock1 = BasicBlock3D(inplanes=32, outplanes=32) self.conv3d2 = Conv3d(in_channels=32, out_channels=64, kernel_size=(5, 5, 5), padding=(2, 2, 2) # ,stride=(1,2,2) ) self.mp3d2 = MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) #(N,Cin,D,H',W') self.bn2 = BatchNorm3d(64) self.basicblock2 = BasicBlock3D(inplanes=64, outplanes=64) self.conv3d3 = Conv3d(in_channels=64, out_channels=96, kernel_size=(1, 5, 5), padding=(0, 2, 2) # ,stride=(1,2,2) ) self.mp3d3 = MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)) # (N,Cin,D,H',W') self.bn3 = BatchNorm3d(96) self.basicblock3 = BasicBlock3D(inplanes=96, outplanes=96) self.gap = AdaptiveAvgPool2d((1, 1)) #self.linear=Linear(in_features=96*72,out_features=d_model) self.linear = Linear(in_features=96, out_features=d_model) self.bn = BatchNorm1d(d_model)
def __init__(self): super(ModelTransformNet, self).__init__() # Initial convolution layers self.conv1 = ConvLayer(1, 32, kernel_size=(3, 9, 9), stride=(1, 1, 1)) self.in1 = BatchNorm3d(32) self.conv2 = ConvLayer(32, 64, kernel_size=(3, 3, 3), stride=(2, 2, 2)) self.in2 = BatchNorm3d(64) self.conv3 = ConvLayer(64, 128, kernel_size=(3, 3, 3), stride=(1, 2, 2)) self.in3 = BatchNorm3d(128) # Residual layers self.res1 = ResidualBlock(128, kernel_size=(3, 3, 3)) self.res2 = ResidualBlock(128, kernel_size=(3, 3, 3)) self.res3 = ResidualBlock(128, kernel_size=(3, 3, 3)) self.res4 = ResidualBlock(128, kernel_size=(3, 3, 3)) self.res5 = ResidualBlock(128, kernel_size=(3, 3, 3)) # Upsampling Layers self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=(3, 3, 3), stride=1, upsample=(1, 2, 2)) self.in4 = BatchNorm3d(64) self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=(3, 3, 3), stride=1, upsample=(2, 2, 2)) self.in5 = BatchNorm3d(32) self.deconv3 = ConvLayer(32, 1, kernel_size=(3, 9, 9), stride=1) # Non-linearities self.relu = ReLU()
def hijack_cgbn(m): if isinstance(m, SynchronizedBatchNorm3d): s = BatchNorm3d(num_features=m.num_features, eps=m.eps, momentum=m.momentum, affine=m.affine) s.running_mean = m.running_mean s.running_var = m.running_var if m.affine: s.weight = m.weight s.bias = m.bias return s else: return m
def _build_pytorch(self, features): import torch from torch.nn import BatchNorm1d, BatchNorm2d, BatchNorm3d if len(features.shape) == 2 or len(features.shape) == 3: self.bn = BatchNorm1d(num_features=features.shape[1]) elif len(features.shape) == 4: self.bn = BatchNorm2d(num_features=features.shape[1]) elif len(features.shape) == 5: self.bn = BatchNorm3d(num_features=features.shape[1]) else: raise RuntimeError("Batch norm not available for other input shapes than [B,L], [B,C,L], [B,C,H,W] or [B,C,D,H,W] dimensional.") if torch.cuda.is_available(): self.bn = self.bn.to(torch.device("cuda")) # TODO move to correct device
def build(self, features): if len(features.shape) == 2 or len(features.shape) == 3: self.bn = BatchNorm1d(num_features=features.shape[1]) elif len(features.shape) == 4: self.bn = BatchNorm2d(num_features=features.shape[1]) elif len(features.shape) == 5: self.bn = BatchNorm3d(num_features=features.shape[1]) else: raise RuntimeError( "Batch norm not available for other input shapes than [B,L], [B,C,L], [B,C,H,W] or [B,C,D,H,W] dimensional." ) if torch.cuda.is_available(): self.bn = self.bn.to(torch.device(features.device))
def get_net(self): return Sequential( Conv3d(3, 8, kernel_size=3, stride=1), BatchNorm3d(8), LeakyReLU(), Conv3d(8, 16, kernel_size=3, stride=1), BatchNorm3d(16), LeakyReLU(), Conv3d(16, 32, kernel_size=3, stride=1), BatchNorm3d(32), LeakyReLU(), Conv3d(32, 64, kernel_size=3, stride=1), BatchNorm3d(64), LeakyReLU(), Conv3d(64, 64, kernel_size=3, stride=2), BatchNorm3d(64), LeakyReLU(), Conv3d(64, 32, kernel_size=3, stride=1), BatchNorm3d(32), LeakyReLU(), Conv3d(32, 16, kernel_size=3, stride=1), BatchNorm3d(16), LeakyReLU(), Conv3d(16, 8, kernel_size=3, stride=1), BatchNorm3d(8), LeakyReLU(), Flatten(), Linear(5832, 2048), LeakyReLU(), Linear(2048, 2048), BatchNorm1d(2048), LeakyReLU(), Linear(2048, 1024), BatchNorm1d(1024), LeakyReLU(), Linear(1024, 512), BatchNorm1d(512), LeakyReLU(), Linear(512, 256), BatchNorm1d(256), LeakyReLU(), Linear(256, 128), BatchNorm1d(128), LeakyReLU(), Linear(128, self.output_dim), Sigmoid() )
def __init__(self, inplanes, planes, kernel_size, stride, padding=1, norm_layer=None): super(BasicBlock, self).__init__() self.net = Sequential( Conv3d(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=False), BatchNorm3d(planes), LeakyReLU() )
def __init__(self, in_channels, out_channels, deconv=False, is_3d=False, bn=True, relu=True, **kwargs): super(BasicConv, self).__init__() # print(in_channels, out_channels, deconv, is_3d, bn, relu, kwargs) self.relu = relu self.use_bn = bn if is_3d: if deconv: self.conv = nn.ConvTranspose3d(in_channels, out_channels, bias=False, **kwargs) else: self.conv = nn.Conv3d(in_channels, out_channels, bias=False, **kwargs) self.bn = BatchNorm3d(out_channels) else: if deconv: self.conv = nn.ConvTranspose2d(in_channels, out_channels, bias=False, **kwargs) else: self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) self.bn = BatchNorm2d(out_channels)
def create_norm(num_features): """Creates a normalization layer. Note: The normalization is configured via :meth:`pytorch_layers.Config.norm`, and :attr:`pytorch_layers.Config.norm_kwargs`, and the saptial dimension is configured via :attr:`pytorch_layers.Config.dim`. Args: num_features (int): The number of input channels. Returns: torch.nn.Module: The created normalization layer. """ config = Config() if config.norm_mode is NormMode.GROUP: from torch.nn import GroupNorm kwargs = config.norm_kwargs.copy() num_groups = kwargs.pop('num_groups') return GroupNorm(num_groups, num_features, **kwargs) elif config.norm_mode is NormMode.NONE: from torch.nn import Identity return Identity() if config.dim is Dim.ONE: if config.norm_mode is NormMode.INSTANCE: from torch.nn import InstanceNorm1d return InstanceNorm1d(num_features, **config.norm_kwargs) elif config.norm_mode is NormMode.BATCH: from torch.nn import BatchNorm1d return BatchNorm1d(num_features, **config.norm_kwargs) elif config.dim is Dim.TWO: if config.norm_mode is NormMode.INSTANCE: from torch.nn import InstanceNorm2d return InstanceNorm2d(num_features, **config.norm_kwargs) elif config.norm_mode is NormMode.BATCH: from torch.nn import BatchNorm2d return BatchNorm2d(num_features, **config.norm_kwargs) elif config.dim is Dim.THREE: if config.norm_mode is NormMode.INSTANCE: from torch.nn import InstanceNorm3d return InstanceNorm3d(num_features, **config.norm_kwargs) elif config.norm_mode is NormMode.BATCH: from torch.nn import BatchNorm3d return BatchNorm3d(num_features, **config.norm_kwargs)
def __init__(self, device, size, getRawData=False, batch=1, mode='udacity'): super(TSNENet, self).__init__() self.fc1 = Linear(8295, 128) # 8374 self.fc2 = Linear(475, 128) self.fc3 = Linear(88, 128) self.fc4 = Linear(512, 128) self.fc5 = Linear(512, 1024) self.conv1 = Conv3d(size, 64, kernel_size=(3, 12, 12), stride=(1, 6, 6)) # , padding=1) self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.fc6 = Linear(1024, 512) self.fc7 = Linear(512, 256) self.fc8 = Linear(256, 128) self.fc9 = Linear(258, 128) self.fc10 = Linear(128, 15) self.lstm1 = LSTM(130, 128, 32) self.h1 = (torch.rand((32, 1, 128)) / 64).to(device) self.c1 = (torch.rand((32, 1, 128)) / 64).to(device) self.drop = Dropout3d(.05) self.elu = ELU() self.relu = ReLU() self.laynorm = GroupNorm(1, 128) self.bnorm1 = BatchNorm3d(64) self.bnorm2 = BatchNorm2d(64) self.bnorm3 = BatchNorm2d(64) self.bnorm4 = BatchNorm2d(64) self.pool1 = MaxPool2d(2) self.pool2 = MaxPool2d(2) self.getRawData = getRawData self.batch = batch
def __init__(self, embedding_dim, max_log_var=0.1): super(Encoder, self).__init__() self.embedding_dim = embedding_dim self.max_log_var = max_log_var self.emb_range_limit = Tanh() self.net = Sequential(Conv3d(1, 8, kernel_size=3, stride=1), BatchNorm3d(8), LeakyReLU(), Conv3d(8, 16, kernel_size=3, stride=1), BatchNorm3d(16), LeakyReLU(), Conv3d(16, 32, kernel_size=3, stride=1), BatchNorm3d(32), LeakyReLU(), Conv3d(32, 64, kernel_size=3, stride=1), BatchNorm3d(64), LeakyReLU(), Conv3d(64, 64, kernel_size=3, stride=2), BatchNorm3d(64), LeakyReLU(), Conv3d(64, 32, kernel_size=3, stride=1), BatchNorm3d(32), LeakyReLU(), Conv3d(32, 16, kernel_size=3, stride=1), BatchNorm3d(16), LeakyReLU(), Conv3d(16, 8, kernel_size=3, stride=1), BatchNorm3d(8), LeakyReLU(), Conv3d(8, 4, kernel_size=3, stride=1), BatchNorm3d(4), LeakyReLU(), Flatten())
def __init__(self, ksize, t_out, k_in_ch): super(MLP_basic, self).__init__() self.conv1 = Conv3d(in_channels=3, out_channels=1, kernel_size=(1, 7, 9), bias=False) self.bn1 = BatchNorm3d(1) self.conv2 = Conv2d(in_channels=k_in_ch, out_channels=1, kernel_size=ksize) self.fc1 = torch.nn.Linear(in_features=31, out_features=10) self.fc2 = torch.nn.Linear(in_features=10, out_features=10) self.fc_s = torch.nn.Linear(in_features=10, out_features=t_out) self.fc_r = torch.nn.Linear(in_features=10, out_features=t_out) self.fc_x = torch.nn.Linear(in_features=10, out_features=t_out) self.fc_y = torch.nn.Linear(in_features=10, out_features=t_out)
def __init__(self, num_in, num_mid, stride=(1,1), kernel=1): super(FeatureInteractionGraph, self).__init__() self.num_s = int(2 * num_mid) self.num_n = int(1 * num_mid) kernel_size = (kernel, kernel, kernel) padding = 1 if kernel == 3 else 0 # reduce dimension self.conv_state = Conv3d(num_in, self.num_s, kernel_size=kernel_size, padding=padding) # generate graph transformation function self.conv_proj = Conv3d(num_in, self.num_n, kernel_size=kernel_size, padding=padding) # ---------- self.gcn = GCN(num_state=self.num_s, num_node=self.num_n) # ---------- # tail: extend dimension self.fc_2 = Conv3d(self.num_s, num_in, kernel_size=kernel_size, padding=padding, stride=1, groups=1, bias=False) self.blocker = BatchNorm3d(num_in)