def __init__(self, inp_feat, out_feat, kernel=3, stride=1, padding=1, residual=None): super(Conv3D_Block, self).__init__() self.conv1 = Sequential( Conv3d(inp_feat, out_feat, kernel_size=kernel, stride=stride, padding=padding, bias=True), BatchNorm3d(out_feat), ReLU()) self.conv2 = Sequential( Conv3d(out_feat, out_feat, kernel_size=kernel, stride=stride, padding=padding, bias=True), BatchNorm3d(out_feat), ReLU()) self.residual = residual if self.residual is not None: self.residual_upsampler = Conv3d(inp_feat, out_feat, kernel_size=1, bias=False)
def __init__(self, in_filters, concat, growth_rate, dim_reduc=False, nonlinearity=torch.nn.functional.relu): super(RatLesNet_DenseBlock, self).__init__() self.concat = concat self.dim_reduc = dim_reduc self.act = nonlinearity self.convs = [] for i in range(self.concat): # Add every 3D convolution to self.convs self.convs.append( nn.Sequential( Conv3d(growth_rate * i + in_filters, growth_rate, 3, stride=1, padding=1), nonlinearity)) self.convs = nn.ModuleList(self.convs) # Output shape of the current DenseBLock self.out_shape = growth_rate * i + in_filters # Reduce dimensions at the end of the block if needed if self.dim_reduc: self.reduc_conv = nn.Sequential( Conv3d(growth_rate * (i + 1) + in_filters, in_channels, 3, stride=1, padding=1), nonlinearity) self.out_shape = in_filters
def __init__(self, config): super(VoxResNet, self).__init__() self.seq1 = nn.Sequential(Conv3d(1, 32, 3, padding=1), BatchNorm3d(32), ReLU(), Conv3d(32, 32, (1, 3, 3), padding=(0, 1, 1))) self.seq2 = nn.Sequential( BatchNorm3d(32), ReLU(), Conv3d(32, 64, 3, padding=1, stride=2), #MaxPool3d(2), VoxResNet_ResBlock(), VoxResNet_ResBlock()) self.seq3 = nn.Sequential( BatchNorm3d(64), ReLU(), Conv3d(64, 64, 3, padding=1, stride=2), #MaxPool3d(2, padding=(1,0,0)), VoxResNet_ResBlock(), VoxResNet_ResBlock()) self.seq4 = nn.Sequential( BatchNorm3d(64), ReLU(), Conv3d(64, 64, 3, padding=1, stride=2), #MaxPool3d(2, padding=(1,0,0)), VoxResNet_ResBlock(), VoxResNet_ResBlock()) """ # For Leiden dataset, 16 slices self.transposed1 = ConvTranspose3d(32, 2, 3, padding=1) self.transposed2 = ConvTranspose3d(64, 2, 3, stride=2, padding=1, output_padding=1) self.transposed3 = ConvTranspose3d(64, 2, 3, stride=4, padding=1, output_padding=3) self.transposed4 = ConvTranspose3d(64, 2, 3, stride=8, padding=1, output_padding=7) """ # For CR dataset, 18 slices self.transposed1 = ConvTranspose3d(32, 2, 3, padding=1) self.transposed2 = ConvTranspose3d(64, 2, 3, stride=2, padding=1, output_padding=1) self.transposed3 = ConvTranspose3d(64, 2, 3, stride=4, padding=1, output_padding=(1, 3, 3)) self.transposed4 = ConvTranspose3d(64, 2, 3, stride=8, padding=1, output_padding=(1, 7, 7))
def __init__(self, in_channels, out_channels, nonlinearity=None): super(Bottleneck3d, self).__init__() if nonlinearity != None: self.conv = nn.Sequential(Conv3d(in_channels, out_channels, 1), nonlinearity) else: self.conv = nn.Sequential(Conv3d(in_channels, out_channels, 1), )
def __init__(self): super(VoxResNet_ResBlock, self).__init__() self.seq = nn.Sequential(BatchNorm3d(64), ReLU(), Conv3d(64, 64, (1, 3, 3), padding=(0, 1, 1)), BatchNorm3d(64), ReLU(), Conv3d(64, 64, (3, 3, 3), padding=1))
def __init__(self, filters): super(UNet3D_ConvBlock, self).__init__() self.seq = nn.Sequential( Conv3d(filters[0], filters[1], 3, padding=1), BatchNorm3d(filters[1]), ReLU(), Conv3d(filters[1], filters[2], 3, padding=1), BatchNorm3d(filters[2]), ReLU(), )
def __init__(self, in_dim): super(PAM_Module, self).__init__() self.chanel_in = in_dim self.query_conv = Conv3d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1) self.key_conv = Conv3d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1) self.value_conv = Conv3d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = Parameter(torch.zeros(1)) self.softmax = Softmax(dim=-1)
def __init__(self, in_filters, bottleneck_filters): super(RatLesNetv2_SE1, self).__init__() self.seq = nn.Sequential( ReLU(), Conv3d(in_filters, bottleneck_filters, 1), ReLU(), Conv3d(bottleneck_filters, in_filters, 1), Sigmoid() )
def __init__(self, in_channels=3, out_channels=5): super(VGG3D, self).__init__() self.conv1 = Sequential( Conv3d(in_channels, 64, kernel_size=(1, 3, 3), padding=(0, 1, 1)), InstanceNorm3d(64, True), ReLU(True), Conv3d(64, 64, kernel_size=(1, 3, 3), padding=(0, 1, 1)), InstanceNorm3d(64, True), ReLU(True), Conv3d(64, 64, kernel_size=(1, 3, 3), padding=(0, 1, 1)), InstanceNorm3d(64, True), ReLU(True), MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))) self.conv2 = Sequential( Conv3d(64, 128, kernel_size=(1, 3, 3), padding=(0, 1, 1)), InstanceNorm3d(128, True), ReLU(True), Conv3d(128, 128, kernel_size=(1, 3, 3), padding=(0, 1, 1)), InstanceNorm3d(128, True), ReLU(True), Conv3d(128, 128, kernel_size=(1, 3, 3), padding=(0, 1, 1)), InstanceNorm3d(128, True), ReLU(True), MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2))) self.conv3 = Sequential(Conv3d(128, 256, kernel_size=3, padding=1), InstanceNorm3d(256, True), ReLU(True), Conv3d(256, 256, kernel_size=3, padding=1), InstanceNorm3d(256, True), ReLU(True), Conv3d(256, 256, kernel_size=3, padding=1), InstanceNorm3d(256, True), ReLU(True), MaxPool3d(2, stride=2)) self.avgpool = AdaptiveAvgPool3d(1, 1, 1) self.fc = Linear(256, out_channels)
def __init__(self, in_filters): super(ResNet, self).__init__() self.seq = nn.Sequential( ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1), ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1) )
def __init__(self, num_channels=1, feat_channels=[4, 8, 16, 32, 64], residual='conv'): # residual: conv for residual input x through 1*1 conv across every layer for downsampling, None for removal of residuals super(UNet3D, self).__init__() layers = [2, 2, 2, 2] block = BasicBlock self.inplanes = 16 self.dilation = 1 self.groups = 1 self.base_width = 64 self.conv1 = nn.Conv2d(9, self.inplanes, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1) self.layer1 = self._make_layer(block, 32, layers[0]) self.layer2 = self._make_layer(block, 64, layers[1], stride=2, dilate=False) self.layer3 = self._make_layer(block, 128, layers[2], stride=2, dilate=False) # Encoder downsamplers self.pool1 = MaxPool3d(kernel_size=3, stride=2, padding=1) self.pool2 = MaxPool3d(kernel_size=3, stride=2, padding=1) self.pool3 = MaxPool3d(kernel_size=3, stride=2, padding=1) self.pool4 = MaxPool3d(kernel_size=3, stride=2, padding=1) # Encoder convolutions self.conv_blk1 = Conv3D_Block(num_channels, feat_channels[0], residual=residual) self.conv_blk2 = Conv3D_Block(feat_channels[0], feat_channels[1], residual=residual) self.conv_blk3 = Conv3D_Block(feat_channels[1], feat_channels[2], residual=residual) self.conv_blk4 = Conv3D_Block(feat_channels[2], feat_channels[3], residual=residual) self.conv_blk5 = Conv3D_Block(feat_channels[3], feat_channels[4], residual=residual) # Decoder convolutions self.dec_conv_blk4 = Conv3D_Block(2*feat_channels[3], feat_channels[3], residual=residual) self.dec_conv_blk3 = Conv3D_Block(2*feat_channels[2], feat_channels[2], residual=residual) self.dec_conv_blk2 = Conv3D_Block(2*feat_channels[1], feat_channels[1], residual=residual) self.dec_conv_blk1 = Conv3D_Block(2*feat_channels[0], feat_channels[0], residual=residual) # Decoder upsamplers self.deconv_blk4 = Deconv3D_Block(feat_channels[4], feat_channels[3]) self.deconv_blk3 = Deconv3D_Block(feat_channels[3], feat_channels[2]) self.deconv_blk2 = Deconv3D_Block(feat_channels[2], feat_channels[1]) self.deconv_blk1 = Deconv3D_Block(feat_channels[1], feat_channels[0]) # Final 1*1 Conv Segmentation map self.one_conv = Conv3d(feat_channels[0], num_channels, kernel_size=1, stride=1, padding=0, bias=True) self.one_one_conv = Conv3d(8, num_channels, kernel_size=1, stride=1, padding=0, bias=True) # Activation function self.activation = Sigmoid()
def __init__(self, in_filters, concat, growth_rate, dim_reduc=False, nonlinearity=torch.nn.functional.relu): super(RatLesNet_ResNetBlock, self).__init__() self.seq = nn.Sequential(ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1), ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, in_filters, 3, padding=1))
def __init__(self, n_deconvfilter, h_shape): print("\nInitializing \"Decoder\"") super(decoder, self).__init__() #3d conv7 conv7_kernel_size = 3 self.conv7 = Conv3d(in_channels= n_deconvfilter[0], \ out_channels= n_deconvfilter[1], \ kernel_size= conv7_kernel_size, \ padding = int((conv7_kernel_size - 1) / 2)) #3d conv7 conv8_kernel_size = 3 self.conv8 = Conv3d(in_channels= n_deconvfilter[1], \ out_channels= n_deconvfilter[2], \ kernel_size= conv8_kernel_size, \ padding = int((conv8_kernel_size - 1) / 2)) #3d conv7 conv9_kernel_size = 3 self.conv9 = Conv3d(in_channels= n_deconvfilter[2], \ out_channels= n_deconvfilter[3], \ kernel_size= conv9_kernel_size, \ padding = int((conv9_kernel_size - 1) / 2)) #3d conv7 conv10_kernel_size = 3 self.conv10 = Conv3d(in_channels= n_deconvfilter[3], \ out_channels= n_deconvfilter[4], \ kernel_size= conv10_kernel_size, \ padding = int((conv10_kernel_size - 1) / 2)) #3d conv7 conv11_kernel_size = 3 self.conv11 = Conv3d(in_channels= n_deconvfilter[4], \ out_channels= n_deconvfilter[5], \ kernel_size= conv11_kernel_size, \ padding = int((conv11_kernel_size - 1) / 2)) #pooling layer self.unpool3d = Unpool3DLayer(unpool_size = 2) #nonlinearities of the network self.leaky_relu = LeakyReLU(negative_slope= 0.01)
def __init__(self, device, size, getRawData=False, mode='udacity'): super(Challenge, self).__init__() if mode == 'udacity': self.fc1 = Linear(8295, 128) self.fc2 = Linear(1938, 128) self.fc3 = Linear(408, 128) self.fc4 = Linear(4480, 128) self.fc5 = Linear(4480, 1024) else: self.fc1 = Linear(6195, 128) self.fc2 = Linear(1428, 128) self.fc3 = Linear(288, 128) self.fc4 = Linear(2560, 128) self.fc5 = Linear(2560, 1024) self.conv1 = Conv3d(size, 64, kernel_size=(3, 12, 12), stride=(1, 6, 6)) self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.fc6 = Linear(1024, 512) self.fc7 = Linear(512, 256) self.fc8 = Linear(256, 128) self.fc9 = Linear(258, 1) self.lstm1 = LSTM(130, 128, 32) self.h1 = torch.zeros(32, 1, 128).to(device) self.c1 = torch.zeros(32, 1, 128).to(device) self.drop = Dropout3d(.25) self.elu = ELU() self.relu = ReLU() self.laynorm = GroupNorm(1, 128) self.getRawData = getRawData
def __init__(self, config): super(RatLesNet_v2_DenseNet, self).__init__() act = config["act"] nfi = config["first_filters"] nfi2 = nfi * 2 conv_num = config["block_convs"] self.conv1 = Conv3d(1, config["first_filters"], 1) self.block1 = RatLesNetv2_DenseNet(nfi) self.mp1 = nn.modules.MaxPool3d(2, ceil_mode=True) self.block2 = RatLesNetv2_DenseNet(nfi) self.mp2 = nn.modules.MaxPool3d(2, ceil_mode=True) self.block3 = RatLesNetv2_DenseNet(nfi) self.mp3 = nn.modules.MaxPool3d(2, ceil_mode=True) self.bottleneck1 = RatLesNetv2_Bottleneck(nfi, nfi) self.block4 = RatLesNetv2_DenseNet(nfi2) self.bottleneck2 = RatLesNetv2_Bottleneck(nfi2, nfi) self.block5 = RatLesNetv2_DenseNet(nfi2) self.bottleneck3 = RatLesNetv2_Bottleneck(nfi2, nfi) self.block6 = RatLesNetv2_DenseNet(nfi2) self.bottleneck4 = RatLesNetv2_Bottleneck(nfi2, 2)
def __init__(self, num_channels=1, feat_channels=[64, 256, 256, 512, 1024], residual='conv'): # residual: conv for residual input x through 1*1 conv across every layer for downsampling, None for removal of residuals super(UNet, self).__init__() # Encoder downsamplers self.pool1 = MaxPool3d((2, 2, 2)) self.pool2 = MaxPool3d((2, 2, 2)) self.pool3 = MaxPool3d((2, 2, 2)) self.pool4 = MaxPool3d((2, 2, 2)) # Encoder convolutions self.conv_blk1 = Conv3D_Block(num_channels, feat_channels[0], residual=residual) self.conv_blk2 = Conv3D_Block(feat_channels[0], feat_channels[1], residual=residual) self.conv_blk3 = Conv3D_Block(feat_channels[1], feat_channels[2], residual=residual) self.conv_blk4 = Conv3D_Block(feat_channels[2], feat_channels[3], residual=residual) self.conv_blk5 = Conv3D_Block(feat_channels[3], feat_channels[4], residual=residual) # Decoder convolutions self.dec_conv_blk4 = Conv3D_Block(2 * feat_channels[3], feat_channels[3], residual=residual) self.dec_conv_blk3 = Conv3D_Block(2 * feat_channels[2], feat_channels[2], residual=residual) self.dec_conv_blk2 = Conv3D_Block(2 * feat_channels[1], feat_channels[1], residual=residual) self.dec_conv_blk1 = Conv3D_Block(2 * feat_channels[0], feat_channels[0], residual=residual) # Decoder upsamplers self.deconv_blk4 = Deconv3D_Block(feat_channels[4], feat_channels[3]) self.deconv_blk3 = Deconv3D_Block(feat_channels[3], feat_channels[2]) self.deconv_blk2 = Deconv3D_Block(feat_channels[2], feat_channels[1]) self.deconv_blk1 = Deconv3D_Block(feat_channels[1], feat_channels[0]) # Final 1*1 Conv Segmentation map self.one_conv = Conv3d(feat_channels[0], num_channels, kernel_size=1, stride=1, padding=0, bias=True) # Activation function self.sigmoid = Sigmoid()
def __init__(self): super(Conv3, self).__init__() self.conv2 = Conv3d(in_channels=10, out_channels=10, kernel_size=5, padding=2, bias=True)
def __init__(self, ch_in=32): super().__init__() self.pam = PAM_Module(in_dim=ch_in) self.cam = CAM_Module(in_dim=ch_in) self.out_conv = Conv3d(in_channels=2 * ch_in, out_channels=ch_in, kernel_size=1)
def __init__(self, config): super(RatLesNet_v2_v2, self).__init__() act = config["act"] nfi = config["first_filters"] nfi2 = nfi * 2 self.conv1 = Conv3d(1, config["first_filters"], 1) self.block1 = RatLesNetv2_ResNet(nfi) self.gate1 = RatLesNetv2_SE1(nfi, int(nfi / 8)) self.mp1 = nn.modules.MaxPool3d(2, ceil_mode=True) self.block2 = RatLesNetv2_ResNet(nfi) self.gate2 = RatLesNetv2_SE1(nfi, int(nfi / 8)) self.mp2 = nn.modules.MaxPool3d(2, ceil_mode=True) self.block3 = RatLesNetv2_ResNet(nfi) self.gate3 = RatLesNetv2_SE1(nfi, int(nfi / 8)) self.mp3 = nn.modules.MaxPool3d(2, ceil_mode=True) self.bottleneck1 = RatLesNetv2_Bottleneck(nfi, nfi) self.block4 = RatLesNetv2_ResNet(nfi) self.bottleneck2 = RatLesNetv2_Bottleneck(nfi, nfi) self.block5 = RatLesNetv2_ResNet(nfi) self.bottleneck3 = RatLesNetv2_Bottleneck(nfi, nfi) self.block6 = RatLesNetv2_ResNet(nfi) self.bottleneck4 = RatLesNetv2_Bottleneck(nfi, 2)
def __init__(self, device, size, outNum, batch=None): super(Challenge, self).__init__() self.fc1 = Linear(8295, 128) self.fc2 = Linear(1938, 128) self.fc3 = Linear(408, 128) self.fc4 = Linear(4480, 128) self.fc5 = Linear(4480, 1024) self.conv1 = Conv3d(size, 64, kernel_size=(3, 12, 12), stride=(1, 6, 6)) self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2)) self.fc6 = Linear(1024, 512) self.fc7 = Linear(512, 256) self.fc8 = Linear(256, 128) self.fc9 = Linear(258, outNum) self.lstm1 = LSTM(130, 128, 32) self.h1 = (torch.rand((32, 1, 128)) / 64).to(device) self.c1 = (torch.rand((32, 1, 128)) / 64).to(device) self.drop = Dropout3d(.25) self.elu = ELU() self.relu = ReLU() self.laynorm = GroupNorm(1, 128)
def __init__(self, in_channels, out_channels, kernel_size, stride): super(ConvLayer, self).__init__() pad = [int(np.floor(x / 2)) for x in kernel_size] self.circular_pad = CirularPad3d(pad) self.conv3d = Conv3d(in_channels, out_channels, kernel_size, stride) stop = 1
def __init__( self, conv3d_eq: nn.Conv3d, input_THW_tuple: Tuple, ): """ Args: conv3d_eq (nn.Module): input nn.Conv3d module to be converted into equivalent conv2d. input_THW_tuple (tuple): input THW size for conv3d_eq during forward. """ super().__init__() # create equivalent conv2d module in_channels = conv3d_eq.in_channels out_channels = conv3d_eq.out_channels bias_flag = conv3d_eq.bias is not None self.conv2d_eq = nn.Conv2d( in_channels, out_channels, kernel_size=(conv3d_eq.kernel_size[1], conv3d_eq.kernel_size[2]), stride=(conv3d_eq.stride[1], conv3d_eq.stride[2]), groups=conv3d_eq.groups, bias=bias_flag, padding=(conv3d_eq.padding[1], conv3d_eq.padding[2]), dilation=(conv3d_eq.dilation[1], conv3d_eq.dilation[2]), ) state_dict = conv3d_eq.state_dict() state_dict["weight"] = state_dict["weight"].squeeze(2) self.conv2d_eq.load_state_dict(state_dict) self.input_THW_tuple = input_THW_tuple
def __init__(self): super(TestModule3d, self).__init__() self.conv1 = Conv3d(in_channels=1, out_channels=4, kernel_size=[3, 3, 3], bias=False) self.nonlin1 = ReLU() self.conv2 = Conv3d(in_channels=4, out_channels=8, kernel_size=[3, 3, 3], bias=False) self.nonlin2 = ReLU() self.conv3 = Conv3d(in_channels=8, out_channels=1, kernel_size=[3, 3, 3], bias=False) self.nonlin3 = ReLU()
def __init__(self, in_filters, out_filters): super(Bottleneck, self).__init__() self.seq = nn.Sequential( ReLU(), BatchNorm3d(in_filters), Conv3d(in_filters, out_filters, 1) )
def __init__(self, inc, outc, kernel_size=3, mode="3d"): super(ConvBlock, self).__init__() if mode not in ["2d", "2D", "3d", "3D", "p3d", "P3D"]: raise ValueError("Unknow mode for convolution") #endif pad_size = (kernel_size - 1) // 2 if mode in ["2d", "2D"]: self.conv1 = Conv3d(inc, outc, kernel_size=(kernel_size, kernel_size, 1), stride=1, padding=(pad_size, pad_size, 0), bias=False) self.conv = self.conv2d #endif if mode in ["3d", "3D"]: self.conv1 = Conv3d(inc, outc, kernel_size=kernel_size, stride=1, padding=pad_size, bias=False) self.conv = self.conv3d #endif if mode in ["p3d", "P3D"]: self.conv1 = Conv3d(inc, outc, kernel_size=(kernel_size, kernel_size, 1), stride=1, padding=(pad_size, pad_size, 0), bias=False) self.conv2 = Conv3d(outc, outc, kernel_size=(1, 1, kernel_size), stride=1, padding=(0, 0, pad_size), bias=False) self.conv = self.convp3d #endif self.norm = InstanceNorm3d(outc)
def __init__(self): print("\ninitializing \"decoder\"") super(decoder, self).__init__() self.n_deconvfilter = [128, 128, 128, 64, 32, 2] # 3d conv1 conv1_kernel_size = 3 self.conv1 = Conv3d(in_channels=self.n_deconvfilter[0], out_channels=self.n_deconvfilter[1], kernel_size=conv1_kernel_size, padding=int((conv1_kernel_size - 1) / 2)) # 3d conv2 conv2_kernel_size = 3 self.conv2 = Conv3d(in_channels=self.n_deconvfilter[1], out_channels=self.n_deconvfilter[2], kernel_size=conv2_kernel_size, padding=int((conv2_kernel_size - 1) / 2)) # 3d conv3 conv3_kernel_size = 3 self.conv3 = Conv3d(in_channels=self.n_deconvfilter[2], out_channels=self.n_deconvfilter[3], kernel_size=conv3_kernel_size, padding=int((conv3_kernel_size - 1) / 2)) # 3d conv4 conv4_kernel_size = 3 self.conv4 = Conv3d(in_channels=self.n_deconvfilter[3], out_channels=self.n_deconvfilter[4], kernel_size=conv4_kernel_size, padding=int((conv4_kernel_size - 1) / 2)) # 3d conv5 conv5_kernel_size = 3 self.conv5 = Conv3d(in_channels=self.n_deconvfilter[4], out_channels=self.n_deconvfilter[5], kernel_size=conv5_kernel_size, padding=int((conv5_kernel_size - 1) / 2)) # pooling layer self.unpool3d = Unpool3DLayer(unpool_size=2) # nonlinearities of the network self.leaky_relu = LeakyReLU(negative_slope=0.01) self.log_softmax = nn.LogSoftmax()
def __init__(self): super(ConvFrontend, self).__init__() self.conv = Conv3d(1, 64, (5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3)) self.norm = BatchNorm3d(64) self.pool = MaxPool3d((1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
def __init__(self, bottom_send, bottom_receive, feat_num): super(CGACRF, self).__init__() self.atten = Conv3d(in_channels=bottom_send + bottom_receive, out_channels=feat_num, kernel_size=3, stride=1, padding=1) self.norm_atten = Sigmoid() self.message = Conv3d(in_channels=bottom_send, out_channels=feat_num, kernel_size=3, stride=1, padding=1) self.scale = Conv3d(in_channels=feat_num, out_channels=bottom_receive, kernel_size=1, bias=True)
def __init__(self, z_num: int): super().__init__() self.z_num = z_num # self.z_num = 3 for smoke3_vel_buo scene self.filters = 64 self.fc = Linear(self.z_num, self.filters * 6 * 9 * 6) # self.fc = Linear(3, self.filters * 4 * 8 * 14) for smoke3_vel_buo scene self.repeat_num = 4 self.num_conv = 4 self.convs = ModuleList() for _ in range(self.repeat_num): for _ in range(self.num_conv): layer = Conv3d(self.filters, self.filters, kernel_size=3, padding=1) self.convs.append(layer) self.last_conv = Conv3d(self.filters, 3, kernel_size=3, padding=1)
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding=0, activation=ReLU(inplace=True), pooling=MaxPool3d(kernel_size=2), same_padding=False): super(ConvBlock3D, self).__init__() self.conv = Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, dilation=dilation, padding=padding) self.activation = activation self.pooling = pooling self.same_padding = same_padding