Exemple #1
0
    def __init__(self, classes_num = 3):
        classes = classes_num
        super(Net_3d_Scale, self).__init__()
        self.img_in_block = NetInBlock(4, 24, 2)
        self.img_down_block1 = NetDownBlock(24, 48, 2)#48*48*48
        self.img_down_block2 = NetDownBlock(48, 64, 2)#24*24*24
        self.img_down_block3 = NetDownBlock(64, 128, 2)#12*12*12
        self.img_down_block4 = NetDownBlock(128, 256, 2)#6*6*6

        self.def_in_block = NetInBlock(6, 24, 2)
        self.def_down_block1 = NetDownBlock(24, 48, 2)#48*48*48
        self.def_down_block2 = NetDownBlock(48, 64, 2)#24*24*24
        self.def_down_block3 = NetDownBlock(64, 128, 2)#12*12*12
        self.def_down_block4 = NetDownBlock(128, 256, 2)#6*6*6

        self.fc_block1 = nn.AdaptiveMaxPool3d((1,1,1))
        self.fc_block2 = nn.Linear(512, 1)
        self.fc_block3 = nn.Sigmoid()

        self.down_24_1 = nn.Sequential()
        self.down_24_1.add_module('conv_1', nn.Conv3d(7,24,kernel_size=3, stride=2, padding=1))
        self.down_24_1.add_module('conv_2', nn.Conv3d(24,48,kernel_size=3, stride=2))
        self.down_24_1.add_module('conv_3', nn.AdaptiveMaxPool3d((1,1,1)))

        
        self.up_block3 = NetUpBlock_DI(512, 128, 128, 256, 2)#12
        self.up_block4 = NetUpBlock_DI(256, 64, 64, 160, 2)#24
        self.out24_1 = NetInBlock(160, 64, 2)
        self.out24_2 = NetInBlock(64, 32, 2)
        self.out24_3 = NetOutSingleBlock(32, classes)
        self.up_block5 = NetJustUpBlock(160, 80, 2)#48
        self.out48_1 = NetInBlock(80, 32, 2)
        self.out48_2 = NetInBlock(64, 32, 2)
        self.out48_3 = NetOutSingleBlock(32, classes)

        self.down_48_1 = nn.Sequential()
        self.down_48_1.add_module('conv_1', nn.Conv3d(7,24,kernel_size=3, stride=2, padding=1))
        self.down_48_1.add_module('conv_2', nn.Conv3d(24,48,kernel_size=3, stride=2, padding=1))
        self.down_48_1.add_module('conv_3', nn.Conv3d(48,96,kernel_size=3, stride=2))
        self.down_48_1.add_module('conv_4', nn.AdaptiveMaxPool3d((1,1,1)))

        self.up_block6 = NetJustUpBlock(80, 32, 2)#96
        self.out96_1 = NetInBlock(64, 32, 1)
        self.out96_block = NetOutSingleBlock(32, classes)

        self.down_96_1 = nn.Sequential()
        self.down_96_1.add_module('conv_1', nn.Conv3d(7,24,kernel_size=3, stride=2, padding=1))
        self.down_96_1.add_module('conv_2', nn.Conv3d(24,48,kernel_size=3, stride=2, padding=1))
        self.down_96_1.add_module('conv_3', nn.Conv3d(48,96,kernel_size=3, stride=2, padding=1))
        self.down_96_1.add_module('conv_4', nn.Conv3d(96,192,kernel_size=3, stride=2))
        self.down_96_1.add_module('conv_5', nn.AdaptiveMaxPool3d((1,1,1)))

        self.fc_def2 = nn.Linear(336, 1)


        self.warp_layer96 = SpatialTransformer(96,96,96)
        self.warp_layer48 = SpatialTransformer(48,48,48)
        self.warp_layer24 = SpatialTransformer(24,24,24)

        self.upsample = nn.Upsample(scale_factor=2, mode='trilinear')#Interpolate(scale_factor=(2, 2, 2), mode='trilinear')
Exemple #2
0
    def __init__(self,
                 lfb_cfg,
                 fbo_cfg,
                 temporal_pool_type='avg',
                 spatial_pool_type='max'):
        super().__init__()
        fbo_type = fbo_cfg.pop('type', 'non_local')
        assert fbo_type in FBOHead.fbo_dict
        assert temporal_pool_type in ['max', 'avg']
        assert spatial_pool_type in ['max', 'avg']

        self.lfb_cfg = copy.deepcopy(lfb_cfg)
        self.fbo_cfg = copy.deepcopy(fbo_cfg)

        self.lfb = LFB(**self.lfb_cfg)
        self.fbo = self.fbo_dict[fbo_type](**self.fbo_cfg)

        # Pool by default
        if temporal_pool_type == 'avg':
            self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
        else:
            self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
        if spatial_pool_type == 'avg':
            self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
        else:
            self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
Exemple #3
0
    def __init__(
            self,
            temporal_pool_type='avg',
            spatial_pool_type='max',
            in_channels=2048,
            focal_gamma=0.,
            focal_alpha=1.,
            num_classes=81,  # First class reserved (BBox as pos/neg)
            dropout_ratio=0,
            dropout_before_pool=True,
            topk=(3, 5),
            multilabel=True):

        super(BBoxHeadAVA, self).__init__()
        assert temporal_pool_type in ['max', 'avg']
        assert spatial_pool_type in ['max', 'avg']
        self.temporal_pool_type = temporal_pool_type
        self.spatial_pool_type = spatial_pool_type

        self.in_channels = in_channels
        self.num_classes = num_classes

        self.dropout_ratio = dropout_ratio
        self.dropout_before_pool = dropout_before_pool

        self.multilabel = multilabel

        self.focal_gamma = focal_gamma
        self.focal_alpha = focal_alpha

        if topk is None:
            self.topk = ()
        elif isinstance(topk, int):
            self.topk = (topk, )
        elif isinstance(topk, tuple):
            assert all([isinstance(k, int) for k in topk])
            self.topk = topk
        else:
            raise TypeError('topk should be int or tuple[int], '
                            f'but get {type(topk)}')
        # Class 0 is ignored when calculating accuracy,
        #      so topk cannot be equal to num_classes.
        assert all([k < num_classes for k in self.topk])

        in_channels = self.in_channels
        # Pool by default
        if self.temporal_pool_type == 'avg':
            self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
        else:
            self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
        if self.spatial_pool_type == 'avg':
            self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
        else:
            self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))

        if dropout_ratio > 0:
            self.dropout = nn.Dropout(dropout_ratio)

        self.fc_cls = nn.Linear(in_channels, num_classes)
        self.debug_imgs = None
 def __init__(self):
     super(_3DCNN, self).__init__(
     )  # 第二、三行都是python类继承的基本操作,此写法应该是python2.7的继承格式,但python3里写这个好像也可以
     self.conv1 = nn.Conv3d(
         1, 8, kernel_size=3, stride=1,
         padding=1)  # in_channels, out_channels, kernel_size=3*3*3
     self.conv2 = nn.Conv3d(8, 16, kernel_size=3, stride=1, padding=1)
     self.conv3 = nn.Conv3d(16, 32, kernel_size=3, stride=1, padding=1)
     self.conv4 = nn.Conv3d(32, 64, kernel_size=3, stride=1, padding=1)
     self.conv5 = nn.Conv3d(64, 128, kernel_size=3, stride=1, padding=1)
     self.BN3d1 = nn.BatchNorm3d(num_features=8)  # num_feature为输入数据通道数
     self.BN3d2 = nn.BatchNorm3d(num_features=16)
     self.BN3d3 = nn.BatchNorm3d(num_features=32)
     self.BN3d4 = nn.BatchNorm3d(num_features=64)
     self.BN3d5 = nn.BatchNorm3d(num_features=128)
     self.pool1 = nn.AdaptiveMaxPool3d(
         (61, 73, 61))  # (61,73,61) is output size
     self.pool2 = nn.AdaptiveMaxPool3d((31, 37, 31))
     self.pool3 = nn.AdaptiveMaxPool3d((16, 19, 16))
     self.pool4 = nn.AdaptiveMaxPool3d((8, 10, 8))
     self.pool5 = nn.AdaptiveMaxPool3d((4, 5, 4))
     self.dropout = nn.Dropout(p=0.5)
     self.fc1 = nn.Linear(10240, 1300)  # 接着三个全连接层
     self.fc2 = nn.Linear(1300, 50)
     self.fc3 = nn.Linear(50, 2)
Exemple #5
0
    def __init__(self,
                 lfb_prefix_path,
                 dataset_mode='train',
                 use_half_precision=True,
                 temporal_pool_type='avg',
                 spatial_pool_type='max',
                 pretrained=None):
        super().__init__()
        rank, _ = get_dist_info()
        if rank == 0:
            if not osp.exists(lfb_prefix_path):
                print(f'lfb prefix path {lfb_prefix_path} does not exist. '
                      f'Creating the folder...')
                mmcv.mkdir_or_exist(lfb_prefix_path)
            print('\nInferring LFB...')

        assert temporal_pool_type in ['max', 'avg']
        assert spatial_pool_type in ['max', 'avg']
        self.lfb_prefix_path = lfb_prefix_path
        self.dataset_mode = dataset_mode
        self.use_half_precision = use_half_precision
        self.pretrained = pretrained

        # Pool by default
        if temporal_pool_type == 'avg':
            self.temporal_pool = nn.AdaptiveAvgPool3d((1, None, None))
        else:
            self.temporal_pool = nn.AdaptiveMaxPool3d((1, None, None))
        if spatial_pool_type == 'avg':
            self.spatial_pool = nn.AdaptiveAvgPool3d((None, 1, 1))
        else:
            self.spatial_pool = nn.AdaptiveMaxPool3d((None, 1, 1))

        self.all_features = []
        self.all_metadata = []
Exemple #6
0
    def forward(self, x, xray):
        # CT
        x = F.interpolate(x, (128, 128, 128))
        out = self.map1(x)
        out = F.relu(self.bn1(out))
        # out = F.relu(out)
        out = nn.AdaptiveMaxPool3d((128, 128, 128))(out)

        out = self.map2(out)
        out = F.relu(self.bn2(out))
        # out = F.relu(out)
        out = nn.AdaptiveMaxPool3d((64, 64, 64))(out)

        out = self.map3(out)
        out = F.relu(self.bn3(out))
        # out = F.relu(out)
        out = nn.AdaptiveMaxPool3d((32, 32, 32))(out)

        out = self.map4(out)
        out = F.relu(self.bn4(out))
        # out = F.relu(out)
        out = nn.AdaptiveMaxPool3d((32, 32, 32))(out)

        out = out.view((1, -1))
        out_ct = F.relu(self.fc_ct(out))

        # X-ray
        xray = F.interpolate(xray, (xray.shape[2] // 2, xray.shape[3] // 2))
        out = self.conv1(xray)
        out = F.relu(self.bn1_x(out))
        # out = F.relu(out)
        out = nn.AdaptiveMaxPool2d((128, 128))(out)

        out = self.conv2(out)
        out = F.relu(self.bn2_x(out))
        # out = F.relu(out)
        out = nn.AdaptiveMaxPool2d((64, 64))(out)

        out = self.conv3(out)
        out = F.relu(self.bn3_x(out))
        # out = F.relu(out)
        out = nn.AdaptiveMaxPool2d((32, 32))(out)

        out = self.conv4(out)
        out = F.relu(self.bn4_x(out))
        # out = F.relu(out)
        out = nn.AdaptiveMaxPool2d((32, 32))(out)

        out = out.view((1, -1))
        out_xray = F.relu(self.fc_x(out))

        out = torch.cat((out_ct, out_xray), dim=-1)
        out = F.relu(self.fc1(out))
        out = F.relu(self.fc2(out))
        out = F.relu(self.fc3(out))
        out = self.fc_out(out)

        return out
Exemple #7
0
 def __init__(self, state_dim, action_dim):
     super(Critic, self).__init__()
     self.conv1 = nn.Conv3d(in_channels=1, out_channels=16, kernel_size=3)
     self.bn1 = nn.BatchNorm3d(16)
     self.conv2 = nn.Conv3d(in_channels=16, out_channels=action_dim, kernel_size=3)
     self.bn2 = nn.BatchNorm3d(action_dim)
     self.adaptiveMaxPool = nn.AdaptiveMaxPool3d(1)
     self.max_action = max_action
     ###
     self.conv1a = nn.Conv3d(in_channels=1, out_channels=16, kernel_size=3)
     self.bn1a = nn.BatchNorm3d(16)
     self.conv2a = nn.Conv3d(in_channels=16, out_channels=action_dim, kernel_size=3)
     self.bn2a = nn.BatchNorm3d(action_dim)
     self.adaptiveMaxPoola = nn.AdaptiveMaxPool3d(1)
Exemple #8
0
def resnet3d(num_classes, expansion=False, maxpool=False):
    """

    Args:
        num_classes (int):

    Returns:
        torch.nn.modules.module.Module

    """

    model = r2plus1d_18(pretrained=False, progress=True)
    num_features = model.fc.in_features
    if expansion:
        model.fc = nn.Sequential(
            OrderedDict([
                ('dense', nn.Linear(in_features=num_features,
                                    out_features=200)),
                ('norm', nn.BatchNorm1d(num_features=200)),
                ('relu', nn.ReLU()), ('dropout', nn.Dropout(p=0.25)),
                ('last', nn.Linear(in_features=200, out_features=num_classes))
            ]))
    else:
        model.fc = nn.Linear(num_features, num_classes, bias=True)
    if maxpool:
        model.avgpool = nn.AdaptiveMaxPool3d(output_size=(1, 1, 1))

    return model
Exemple #9
0
    def __init__(self, n_inputs, n_classes, init_type=None):
        super().__init__()

        net_args = model_types['unet-simple']
        self.mapping_network = ReducedUnet(**net_args,
                                           n_inputs=n_inputs,
                                           n_ouputs=n_inputs,
                                           return_feat_maps=True)

        n_filters_map = self.mapping_network.n_output_filters
        self.conv2d_1d = nn.Sequential(
            nn.Conv3d(n_filters_map,
                      64,
                      kernel_size=(1, 3, 3),
                      padding=(0, 1, 1)), nn.ReLU(inplace=True),
            nn.Conv3d(64, 64, kernel_size=(3, 1, 1), padding=(1, 0, 0)),
            nn.Conv3d(64, 64, kernel_size=(1, 3, 3), padding=(0, 1, 1)),
            nn.ReLU(inplace=True),
            nn.Conv3d(64, 32, kernel_size=(3, 1, 1), padding=(1, 0, 0)),
            nn.AdaptiveMaxPool3d((1, 1, None)))

        self.fc_clf = nn.Linear(32, n_classes)

        #initialize model
        if init_type is not None:
            for m in self.modules():
                init_weights(m, init_type=init_type)
Exemple #10
0
    def __init__(self, input_dim, output_dim):
        super(conv_hybrid, self).__init__()
        self.inp_dim = input_dim
        self.out_dim = output_dim

        incep_output_dim = 128
        self.inception1 = nn.Sequential(
            nn.Conv2d(input_dim, incep_output_dim, kernel_size=1),
            nn.MaxPool2d(3))
        self.inception2 = nn.Conv2d(input_dim, incep_output_dim, kernel_size=3)
        self.conv_cat = nn.Sequential(
            nn.ReLU(), nn.LocalResponseNorm(3, k=2),
            nn.Conv2d(incep_output_dim * 2, 128, kernel_size=1), nn.ReLU(),
            nn.LocalResponseNorm(3, k=2))
        self.residual = nn.Sequential(
            nn.Conv2d(128, 128, kernel_size=1),
            nn.ReLU(),
            nn.Conv2d(128, 128, kernel_size=1),
        )
        self.activat = nn.ReLU()
        self.conv_alex = nn.Sequential(
            nn.ReLU(), nn.Conv2d(128, 128, kernel_size=1), nn.ReLU(),
            nn.Dropout(), nn.Conv2d(128, 128, kernel_size=1), nn.ReLU(),
            nn.Dropout(), nn.Conv2d(128, 128, kernel_size=1),
            nn.AdaptiveMaxPool3d((output_dim, None, None)))
Exemple #11
0
    def __init__(
        self,
        penet_params,
        freeze_penet=True,
        device=0,
    ):
        super(PE_MIL, self).__init__()
        self.penet = PENet(**penet_params)
        self.avg_pool = nn.AdaptiveAvgPool3d(1)
        self.max_pool = nn.AdaptiveMaxPool3d((None, 1, 1))
        self.mil_model = MIL_SoftmaxAttention(inplanes=768,
                                              midplanes=512,
                                              dropout=0.1)
        if freeze_penet:
            for param in self.penet.parameters():
                #print(param)
                param.requires_grad = False

        self.fine_tuning_param('out_conv')
        self.fine_tuning_param('asp_pool')
        self.fine_tuning_param('encoders.2')
        self.fine_tuning_param('encoders.1')
        self.fine_tuning_param('encoders.0')
        self.fine_tuning_param('in_conv')

        # freeze bn
        for m in self.modules():
            if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.GroupNorm):
                for p in m.parameters():
                    p.requires_grad = False
Exemple #12
0
    def __init__(self, num_classes, input_channel=3):
        super(C3D, self).__init__()

        self.feature = nn.Sequential(
            nn.Conv3d(input_channel,
                      64,
                      kernel_size=(3, 3, 3),
                      padding=(1, 1, 1)),
            nn.MaxPool3d(kernel_size=(1, 2, 2), stride=(1, 2, 2)),
            nn.Conv3d(64, 128, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
            nn.Conv3d(128, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            nn.Conv3d(256, 256, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
            nn.Conv3d(256, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            nn.MaxPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2)),
            nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            nn.Conv3d(512, 512, kernel_size=(3, 3, 3), padding=(1, 1, 1)),
            nn.AdaptiveMaxPool3d(output_size=(1, 4, 4)))

        self.fc = nn.Sequential(nn.Linear(8192, 4096), nn.ReLU(inplace=True),
                                nn.Dropout3d(0.5), nn.Linear(4096, 4096),
                                nn.ReLU(inplace=True), nn.Dropout3d(0.5),
                                nn.Linear(4096, num_classes))

        self.__init_weight()
Exemple #13
0
    def __init__(self,
                 block,
                 layers,
                 num_classes,
                 mode='ip',
                 add_landmarks=False):
        super().__init__()

        assert mode in ['ip', 'ir']
        self.mode = mode
        self.add_landmarks = add_landmarks

        self.in_channels = 64
        if add_landmarks:
            self.conv1 = nn.Conv3d(4,
                                   64,
                                   kernel_size=(3, 7, 7),
                                   stride=(1, 2, 2),
                                   padding=(1, 3, 3),
                                   bias=False)
        else:
            self.conv1 = nn.Conv3d(3,
                                   64,
                                   kernel_size=(3, 7, 7),
                                   stride=(1, 2, 2),
                                   padding=(1, 3, 3),
                                   bias=False)
        self.bn1 = nn.BatchNorm3d(64)
        self.gn1 = nn.GroupNorm(1, 64)
        self.relu = nn.ReLU()
        self.max_pool = nn.MaxPool3d(kernel_size=(1, 3, 3),
                                     stride=(1, 2, 2),
                                     padding=(0, 1, 1))

        self.layer1 = self._make_layer(block, 64, layers[0], stride=1)
        #self.tat_layer = TATLayer(in_channels=256)
        self.nl_1 = NONLocalBlock3D(in_channels=256)
        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
        self.nl_2 = NONLocalBlock3D(in_channels=512)
        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
        self.nl_3 = NONLocalBlock3D(in_channels=1024)
        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)

        self.global_avg_pool = nn.AdaptiveAvgPool3d(1)  # 原文是avgpool
        self.global_max_pool = nn.AdaptiveMaxPool3d(1)
        self.fc1 = nn.Linear(512 * block.expansion * 2, 9)
        #self.fc2 = nn.Linear(256, num_classes)
        self.dropout = nn.Dropout()
        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()

        # initialize
        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                nn.init.kaiming_normal_(m.weight,
                                        mode='fan_out',
                                        nonlinearity='relu')
            elif isinstance(m, nn.BatchNorm3d) or isinstance(m, nn.GroupNorm):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Exemple #14
0
    def __init__(self):

        super(FCN, self).__init__()

        self.dropout = nn.Dropout2d(p=0.2)
        self.batch_norm1 = nn.BatchNorm2d(8)
        self.batch_norm2 = nn.BatchNorm2d(16)
        self.batch_norm3 = nn.BatchNorm2d(32)
        self.batch_norm4 = nn.BatchNorm2d(16)
        self.batch_norm5 = nn.BatchNorm2d(2)

        self.conv1 = nn.Conv2d(in_channels=1,
                               out_channels=8,
                               kernel_size=(3, 3))
        self.conv2 = nn.Conv2d(in_channels=8,
                               out_channels=16,
                               kernel_size=(3, 3))
        self.conv3 = nn.Conv2d(in_channels=16,
                               out_channels=32,
                               kernel_size=(3, 3))
        self.conv4 = nn.Conv2d(in_channels=32,
                               out_channels=16,
                               kernel_size=(1, 1))
        self.conv5 = nn.Conv2d(in_channels=16,
                               out_channels=2,
                               kernel_size=(1, 1))

        self.global_pool = nn.AdaptiveMaxPool3d((2, 1, 1))
        self.padding = 1
    def __init__(self, input_channels, n_classes):
        super(SSNet, self).__init__()
        self.input_channels = input_channels
        self.n_classes = n_classes

        self.features_size = 8000

        self.loss_func = nn.CrossEntropyLoss()
        self.train_accuracy = torchmetrics.Accuracy()
        self.val_accuracy = torchmetrics.Accuracy()

        self.conv1 = nn.Sequential(nn.Conv3d(1, 8, (7, 3, 3), stride=1),
                                   nn.LeakyReLU(), nn.MaxPool3d(2))

        self.conv2 = nn.Sequential(nn.Conv3d(8, 16, (5, 3, 3), stride=1),
                                   nn.LeakyReLU(), nn.MaxPool3d(2))

        self.conv3 = nn.Sequential(nn.Conv3d(16, 32, (3, 3, 3), stride=1),
                                   nn.LeakyReLU(),
                                   nn.AdaptiveMaxPool3d((10, 5, 5)))

        # self.conv4 = nn.Sequential(
        #                 nn.Conv2d(32, 64, (3, 3), stride=1),
        #                 nn.LeakyReLU())

        self.fc1 = nn.Sequential(nn.Linear(self.features_size, 256),
                                 nn.LeakyReLU(), nn.Dropout(0.5))

        self.fc2 = nn.Sequential(nn.Linear(256, 128), nn.LeakyReLU(),
                                 nn.Dropout(0.5),
                                 nn.Linear(128, self.n_classes))
    def __init__(self,
                 in_channels,
                 kernels=3,
                 scale=1.0,
                 norm='none',
                 residual=True,
                 **kwargs):
        super(DynamicAttention, self).__init__()

        self.scale = scale
        self.residual = residual

        self.key = nn.Sequential(
            *conv_1xkxk_bn(in_channels,
                           in_channels,
                           kernels,
                           1,
                           groups=in_channels,
                           norm=norm), HSwish(),
            *conv_1x1x1_bn(in_channels, in_channels, norm=norm),
            nn.AdaptiveMaxPool3d((1, 1, 1)))
        self.value = nn.Sequential(
            *conv_1xkxk_bn(in_channels,
                           in_channels,
                           kernels,
                           1,
                           groups=in_channels,
                           norm=norm), HSwish(),
            *conv_1x1x1_bn(in_channels, in_channels, norm=norm))
Exemple #17
0
 def __init__(self):
     super(TumorClassifier, self).__init__()
     self.featureExtractor = nn.Sequential(
         nn.Conv3d(1, 8, 3, stride=1, padding=1),  #40
         nn.BatchNorm3d(8),
         nn.ReLU(),
         nn.Conv3d(8, 16, 3, stride=1, padding=1),
         nn.BatchNorm3d(16),
         nn.ReLU(),
         nn.Conv3d(16, 16, 3, stride=1, padding=1),
         nn.BatchNorm3d(16),
         nn.ReLU(),
         nn.MaxPool3d(2),  #20
         nn.Conv3d(16, 32, 3, stride=1, padding=1),
         nn.BatchNorm3d(32),
         nn.ReLU(),
         nn.Conv3d(32, 32, 3, stride=1, padding=1),
         nn.BatchNorm3d(32),
         nn.ReLU(),
         nn.MaxPool3d(2),  #10
         nn.Conv3d(32, 64, 3, stride=1, padding=1),
         nn.BatchNorm3d(64),
         nn.ReLU(),
         nn.MaxPool3d(2),  #5
         nn.Conv3d(64, 128, 3, stride=1, padding=1),
         nn.ReLU(),
         nn.Conv3d(128, 64, 1),
         nn.ReLU(),
         nn.Conv3d(64, 1, 1),
         nn.Sigmoid(),
         nn.AdaptiveMaxPool3d(1)
         #nn.AdaptiveMaxPool3d(1),
     )
     self.classifier = nn.Sequential(nn.Linear(128, 64), nn.ReLU(),
                                     nn.Linear(64, 1), nn.Sigmoid())
Exemple #18
0
 def __init__(self, in_planes, rotio=16):
     super(ChannelAttention3d, self).__init__()
     self.avg_pool = nn.AdaptiveAvgPool3d(1)
     self.max_pool = nn.AdaptiveMaxPool3d(1)
     self.sharedMLP = nn.Sequential(
         nn.Conv3d(in_planes, in_planes // rotio, 1, bias=False), nn.ReLU(),
         nn.Conv3d(in_planes // rotio, in_planes, 1, bias=False))
Exemple #19
0
    def __init__(self,
                 spatial_type: str = 'avg',
                 spatial_size: int = 7,
                 temporal_size: int = 1):
        super(SimpleSTModule, self).__init__()

        assert spatial_type in ['avg', 'max']
        self.spatial_type = spatial_type

        self.spatial_size = spatial_size
        if spatial_size != -1:
            self.spatial_size = (spatial_size, spatial_size)

        self.temporal_size = temporal_size

        assert not (self.spatial_size == -1) ^ (self.temporal_size == -1)

        if self.temporal_size == -1 and self.spatial_size == -1:
            self.pool_size = (1, 1, 1)
            if self.spatial_type == 'avg':
                self.pool_func = nn.AdaptiveAvgPool3d(self.pool_size)
            if self.spatial_type == 'max':
                self.pool_func = nn.AdaptiveMaxPool3d(self.pool_size)
        else:
            self.pool_size = (self.temporal_size, ) + self.spatial_size
            if self.spatial_type == 'avg':
                self.pool_func = nn.AvgPool3d(self.pool_size,
                                              stride=1,
                                              padding=0)
            if self.spatial_type == 'max':
                self.pool_func = nn.MaxPool3d(self.pool_size,
                                              stride=1,
                                              padding=0)
Exemple #20
0
    def forward(self, x):
        sz = x.size()
        if len(sz) == 3:
            if self.avg_pool is None:
                self.avg_pool = nn.AdaptiveAvgPool1d(1)
                self.max_pool = nn.AdaptiveMaxPool1d(1)
            # Take the input and apply average and max pooling
            avg_values = self.avg_pool(x)
            max_values = self.max_pool(x)
            out = (self.mlp(avg_values) + self.mlp(max_values)).view(sz[0], sz[1], 1)
        if len(sz) == 4:
            if self.avg_pool is None:
                self.avg_pool = nn.AdaptiveAvgPool2d(1)
                self.max_pool = nn.AdaptiveMaxPool2d(1)
            # Take the input and apply average and max pooling
            avg_values = self.avg_pool(x)
            max_values = self.max_pool(x)
            out = (self.mlp(avg_values) + self.mlp(max_values)).view(sz[0], sz[1], 1, 1)
        if len(sz) == 5:
            if self.avg_pool is None:
                self.avg_pool = nn.AdaptiveAvgPool3d(1)
                self.max_pool = nn.AdaptiveMaxPool3d(1)
            # Take the input and apply average and max pooling
            avg_values = self.avg_pool(x)
            max_values = self.max_pool(x)
            out = (self.mlp(avg_values) + self.mlp(max_values)).view(sz[0], sz[1], 1, 1, 1)

        scale = x * th.sigmoid(out)
        return scale
Exemple #21
0
    def __init__(self,
                 num_classes,
                 in_channels,
                 loss_cls=dict(type='CrossEntropyLoss'),
                 spatial_type='avg',
                 dropout_ratio=0.5,
                 init_std=0.01,
                 fc1_bias=False):
        super().__init__(num_classes, in_channels, loss_cls)

        self.spatial_type = spatial_type
        self.dropout_ratio = dropout_ratio
        self.init_std = init_std
        if self.dropout_ratio != 0:
            self.dropout = nn.Dropout(p=self.dropout_ratio)
        else:
            self.dropout = None
        self.in_channels = in_channels
        self.mid_channels = 2048
        self.num_classes = num_classes
        self.fc1_bias = fc1_bias

        self.fc1 = nn.Linear(
            self.in_channels, self.mid_channels, bias=self.fc1_bias)
        self.fc2 = nn.Linear(self.mid_channels, self.num_classes)

        self.relu = nn.ReLU()

        self.pool = None
        if self.spatial_type == 'avg':
            self.pool = nn.AdaptiveAvgPool3d((1, 1, 1))
        elif self.spatial_type == 'max':
            self.pool = nn.AdaptiveMaxPool3d((1, 1, 1))
        else:
            raise NotImplementedError
Exemple #22
0
    def __init__(self, num_classes=1):
        super(AlexNet3D_Dropout, self).__init__()
        self.features = nn.Sequential(
            nn.Conv3d(1, 64, kernel_size=5, stride=2, padding=0),
            nn.BatchNorm3d(64),
            nn.ReLU(inplace=True),
            nn.MaxPool3d(kernel_size=3, stride=3),
            nn.Conv3d(64, 128, kernel_size=3, stride=1, padding=0),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool3d(kernel_size=3, stride=3),
            nn.Conv3d(128, 192, kernel_size=3, padding=1),
            nn.BatchNorm3d(192),
            nn.ReLU(inplace=True),
            nn.Conv3d(192, 192, kernel_size=3, padding=1),
            nn.BatchNorm3d(192),
            nn.ReLU(inplace=True),
            nn.Conv3d(192, 128, kernel_size=3, padding=1),
            nn.BatchNorm3d(128),
            nn.ReLU(inplace=True),
            nn.AdaptiveMaxPool3d(1),
        )

        self.regressor = nn.Sequential(nn.Dropout(), nn.Linear(128, 64),
                                       nn.ReLU(inplace=True), nn.Dropout(),
                                       nn.Linear(64, num_classes))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm3d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Exemple #23
0
                def __init__(self, in_channels=3, n_outputs=2):
                    super().__init__()
                    self.conv1 = nn.Conv3d(in_channels,
                                           64,
                                           3,
                                           stride=2,
                                           padding=0)
                    self.conv2 = nn.Conv3d(64, 64, 3, stride=1, padding=0)
                    self.bn1 = nn.BatchNorm3d(64)

                    self.conv3 = nn.Conv3d(64, 128, 3, stride=2, padding=0)
                    self.conv4 = nn.Conv3d(128, 128, 3, stride=1, padding=0)
                    self.bn2 = nn.BatchNorm3d(128)

                    self.conv5 = nn.Conv3d(128, 256, 3, stride=2, padding=0)
                    self.conv6 = nn.Conv3d(256,
                                           256, (1, 3, 3),
                                           stride=1,
                                           padding=0)
                    self.bn3 = nn.BatchNorm3d(256)

                    self.pool = nn.AdaptiveMaxPool3d((1, 16, 16))
                    self.fc1 = nn.Linear(in_features=65536, out_features=1024)
                    self.dropout1 = nn.Dropout(p=0.5, inplace=True)
                    self.fc2 = nn.Linear(1024, 64)
                    self.dropout2 = nn.Dropout(p=0.1, inplace=True)
                    self.fc3 = nn.Linear(64, n_outputs)
Exemple #24
0
    def __init__(self):
        super(AttentionModule, self).__init__()
        self.temporal_size = 4
        self.height = 8
        self.width = 8
        reduction_ratio = 16

        self.spatial_attention = nn.Sequential(
            nn.Conv3d(512,
                      1,
                      kernel_size=(1, self.height, self.width),
                      bias=True), nn.BatchNorm3d(1), nn.Sigmoid())
        self.temporal_attention = nn.Sequential(
            nn.Conv3d(512,
                      1,
                      kernel_size=(self.temporal_size, 1, 1),
                      bias=True), nn.BatchNorm3d(1), nn.Sigmoid())
        self.avg_pool = nn.AdaptiveAvgPool3d(512)
        self.max_pool = nn.AdaptiveMaxPool3d(512)
        # self.channel_attention = nn.Sequential(
        #     Flatten(),
        #     nn.Linear(512, 512 // reduction_ratio),
        #     nn.ReLU(),
        #     nn.Linear(512 // reduction_ratio, 512)
        # )
        self.sigmoid = nn.Sigmoid()
        self.relu = nn.ReLU()
Exemple #25
0
    def __init__(self, depth=18):
        super(R2Plus1D, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv3d(3,
                      64,
                      kernel_size=(3, 7, 7),
                      padding=(1, 3, 3),
                      stride=(1, 2, 2),
                      bias=False),
            nn.BatchNorm3d(64),
            nn.ReLU(inplace=True),
        )

        if depth == 10:
            self.conv2x = BasicR2P1DBlock(64, 64)
            self.conv3x = BasicR2P1DBlock(64, 128, stride=(2, 2, 2))
            self.conv4x = BasicR2P1DBlock(128, 256, stride=(2, 2, 2))
            self.conv5x = BasicR2P1DBlock(256, 512, stride=(2, 2, 2))
        elif depth == 18:
            self.conv2x = nn.Sequential(BasicR2P1DBlock(64, 64),
                                        BasicR2P1DBlock(64, 64))
            self.conv3x = nn.Sequential(
                BasicR2P1DBlock(64, 128, stride=(2, 2, 2)),
                BasicR2P1DBlock(128, 128))
            self.conv4x = nn.Sequential(
                BasicR2P1DBlock(128, 256, stride=(2, 2, 2)),
                BasicR2P1DBlock(256, 256))
            self.conv5x = nn.Sequential(
                BasicR2P1DBlock(256, 512, stride=(2, 2, 2)),
                BasicR2P1DBlock(512, 512))

        self.pool = nn.AdaptiveMaxPool3d((1, 1, 1))
        self.out_dim = 512
Exemple #26
0
 def __init__(
     self,
     spatial_type='avg',
     spatial_size=7,
     temporal_size=4,
     consensus_cfg=dict(type='avg', dim=1),
     dropout_ratio=0.5,
     in_channels=2048,
     num_classes=400,
     init_std=0.01,
     fcn_testing=False,
     extract_feat=False,
 ):
     super(I3DClsHead,
           self).__init__(spatial_size, dropout_ratio, in_channels,
                          num_classes, init_std, extract_feat)
     self.spatial_type = spatial_type
     self.consensus_type = consensus_cfg['type']
     self.temporal_size = temporal_size
     assert not (self.spatial_size == -1) ^ (self.temporal_size == -1)
     if self.temporal_size == -1 and self.spatial_size == -1:
         self.pool_size = (1, 1, 1)
         if self.spatial_type == 'avg':
             self.Logits = nn.AdaptiveAvgPool3d(self.pool_size)
         if self.spatial_type == 'max':
             self.Logits = nn.AdaptiveMaxPool3d(self.pool_size)
     else:
         self.pool_size = (self.temporal_size, ) + self.spatial_size
         if self.spatial_type == 'avg':
             self.Logits = nn.AvgPool3d(self.pool_size, stride=1, padding=0)
         if self.spatial_type == 'max':
             self.Logits = nn.MaxPool3d(self.pool_size, stride=1, padding=0)
     self.fc_cls = nn.Linear(self.in_channels, self.num_classes)
     self.fcn_testing = fcn_testing
     self.new_cls = None
Exemple #27
0
    def __init__(self):
        super(NNPoolingModule, self).__init__()
        self.input1d = torch.randn(1, 16, 50)
        self.module1d = nn.ModuleList([
            nn.MaxPool1d(3, stride=2),
            nn.AvgPool1d(3, stride=2),
            nn.LPPool1d(2, 3, stride=2),
            nn.AdaptiveMaxPool1d(3),
            nn.AdaptiveAvgPool1d(3),
        ])

        self.input2d = torch.randn(1, 16, 30, 10)
        self.module2d = nn.ModuleList([
            nn.MaxPool2d((3, 2), stride=(2, 1)),
            nn.AvgPool2d((3, 2), stride=(2, 1)),
            nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)),
            nn.LPPool2d(2, 3, stride=(2, 1)),
            nn.AdaptiveMaxPool2d((5, 7)),
            nn.AdaptiveAvgPool2d((7)),
        ])

        self.input3d = torch.randn(1, 16, 20, 4, 4)
        self.module3d = nn.ModuleList([
            nn.MaxPool3d(2),
            nn.AvgPool3d(2),
            nn.FractionalMaxPool3d(2, output_ratio=(0.5, 0.5, 0.5)),
            nn.AdaptiveMaxPool3d((5, 7, 9)),
            nn.AdaptiveAvgPool3d((5, 7, 9)),
        ])
Exemple #28
0
    def __init__(self, n_inputs, n_classes, init_type=None, batch_norm=False):
        super().__init__()

        if batch_norm:
            net_args = model_types['unet-simple-bn']
        else:
            net_args = model_types['unet-simple']

        self.mapping_network = ReducedUnet(**net_args,
                                           n_inputs=n_inputs,
                                           n_ouputs=n_inputs,
                                           return_feat_maps=True)

        n_filters_map = self.mapping_network.n_output_filters

        layers = get_conv2dplus1d(n_filters_map, 64, 64, batch_norm)
        if batch_norm:
            layers += [nn.BatchNorm3d(64)]
        layers += [nn.ReLU(inplace=True)] + get_conv2dplus1d(
            64, 32, 64, batch_norm) + [nn.AdaptiveMaxPool3d((1, 1, None))]

        self.conv2d_1d = nn.Sequential(*layers)

        self.fc_clf = nn.Linear(32, n_classes)

        #initialize model
        if init_type is not None:
            for m in self.modules():
                init_weights(m, init_type=init_type)
Exemple #29
0
	def __init__(self, channel, depth):
		super(CDcor, self).__init__()
		self.channel = channel
		self.depth = depth

		self.gp = nn.Sequential(
			nn.AdaptiveMaxPool3d((1, 1, 1)),
			nn.ReLU(inplace=True)
		)

		self.fc_channel = nn.Sequential(
			nn.Linear(self.channel, self.channel // 16, bias = False),
			nn.ReLU(inplace=True),
			nn.Linear(self.channel//16, self.channel, bias = False),
			nn.Sigmoid()
		)

		self.fc_depth = nn.Sequential(
			nn.Linear(self.depth, self.depth // 16, bias = False),
			nn.ReLU(inplace=True),
			nn.Linear(self.depth//16, self.depth, bias = False),
			nn.Sigmoid()
		)

		self.correlation = nn.Sequential(
			nn.Linear(self.channel, self.channel // 16, bias = False),
			nn.ReLU(inplace=True),
			nn.Linear(self.channel//16, self.depth, bias = False),
			nn.Sigmoid()
		)
Exemple #30
0
    def __init__(self, input_dim, input_w, input_h, output_dim):
        super(fconv3d_net, self).__init__()
        self.inp_w = input_w
        self.inp_h = input_h
        self.out_dim = output_dim
        kernel_size = 7
        kernel_num = 20

        self.conv = nn.Sequential(
            nn.Conv3d(1, kernel_num, kernel_size=kernel_size, stride=1),
            nn.ReLU(),
            nn.LocalResponseNorm(3, k=2),
            nn.Conv3d(kernel_num,
                      kernel_num,
                      kernel_size=kernel_size,
                      stride=1),
            nn.ReLU(),
            nn.LocalResponseNorm(3, k=2),
            nn.ConvTranspose3d(kernel_num,
                               kernel_num,
                               kernel_size=kernel_size,
                               stride=1),
            nn.ReLU(),
            nn.Dropout(),
            nn.ConvTranspose3d(kernel_num,
                               1,
                               kernel_size=kernel_size,
                               stride=1),
            nn.ReLU(),
            nn.Dropout(),
            nn.Conv3d(1, 128, kernel_size=1),
        )
        self.out_layer = nn.AdaptiveMaxPool3d((output_dim, None, None))