コード例 #1
0
    def __init__(self, device, size, getRawData=False, mode='udacity'):
        super(Challenge, self).__init__()
        if mode == 'udacity':
            self.fc1 = Linear(8295, 128)
            self.fc2 = Linear(1938, 128)
            self.fc3 = Linear(408, 128)
            self.fc4 = Linear(4480, 128)
            self.fc5 = Linear(4480, 1024)
        else:
            self.fc1 = Linear(6195, 128)
            self.fc2 = Linear(1428, 128)
            self.fc3 = Linear(288, 128)
            self.fc4 = Linear(2560, 128)
            self.fc5 = Linear(2560, 1024)
        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, 1)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = torch.zeros(32, 1, 128).to(device)
        self.c1 = torch.zeros(32, 1, 128).to(device)
        self.drop = Dropout3d(.25)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)
        self.getRawData = getRawData
コード例 #2
0
    def __init__(self, device, size, outNum, batch=None):
        super(Challenge, self).__init__()
        self.fc1 = Linear(8295, 128)
        self.fc2 = Linear(1938, 128)
        self.fc3 = Linear(408, 128)
        self.fc4 = Linear(4480, 128)
        self.fc5 = Linear(4480, 1024)

        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, outNum)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.c1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.drop = Dropout3d(.25)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)
コード例 #3
0
    def forward(self, x):
        # Encoder part

        x1 = self.conv_blk1(x)

        x_low1 = self.pool1(x1)
        x2 = self.conv_blk2(x_low1)

        x_low2 = self.pool2(x2)
        x3 = self.conv_blk3(x_low2)

        x_low3 = self.pool3(x3)
        x4 = self.conv_blk4(x_low3)

        x_low4 = self.pool4(x4)
        base = self.conv_blk5(x_low4)

        # Decoder part

        d4 = torch.cat([self.deconv_blk4(base), x4], dim=1)
        d_high4 = self.dec_conv_blk4(d4)

        d3 = torch.cat([self.deconv_blk3(d_high4), x3], dim=1)
        d_high3 = self.dec_conv_blk3(d3)
        d_high3 = Dropout3d(p=0.5)(d_high3)

        d2 = torch.cat([self.deconv_blk2(d_high3), x2], dim=1)
        d_high2 = self.dec_conv_blk2(d2)
        d_high2 = Dropout3d(p=0.5)(d_high2)

        d1 = torch.cat([self.deconv_blk1(d_high2), x1], dim=1)
        d_high1 = self.dec_conv_blk1(d1)

        seg = self.sigmoid(self.one_conv(d_high1))

        return seg
コード例 #4
0
    def __init__(self,
                 device,
                 size,
                 getRawData=False,
                 batch=1,
                 mode='udacity'):
        super(TSNENet, self).__init__()
        self.fc1 = Linear(8295, 128)  # 8374
        self.fc2 = Linear(475, 128)
        self.fc3 = Linear(88, 128)
        self.fc4 = Linear(512, 128)
        self.fc5 = Linear(512, 1024)

        self.conv1 = Conv3d(size,
                            64,
                            kernel_size=(3, 12, 12),
                            stride=(1, 6, 6))  # , padding=1)
        self.conv2 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv3 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))
        self.conv4 = Conv2d(64, 64, kernel_size=(5, 5), stride=(2, 2))

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(258, 128)
        self.fc10 = Linear(128, 15)
        self.lstm1 = LSTM(130, 128, 32)

        self.h1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.c1 = (torch.rand((32, 1, 128)) / 64).to(device)
        self.drop = Dropout3d(.05)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)

        self.bnorm1 = BatchNorm3d(64)
        self.bnorm2 = BatchNorm2d(64)
        self.bnorm3 = BatchNorm2d(64)
        self.bnorm4 = BatchNorm2d(64)

        self.pool1 = MaxPool2d(2)
        self.pool2 = MaxPool2d(2)

        self.getRawData = getRawData
        self.batch = batch
コード例 #5
0
    def __init__(self,
                 device,
                 size,
                 getRawData=False,
                 batch=1,
                 mode='udacity'):
        super(TSNENet, self).__init__()
        self.fc1 = Linear(118, 128)
        self.fc2 = Linear(117, 128)
        self.fc3 = Linear(116, 128)
        self.fc4 = Linear(116, 128)
        self.fc5 = Linear(1856, 1024)

        self.conv1 = Conv1d(size, 32, kernel_size=3, stride=1)
        self.conv2 = Conv1d(32, 32, kernel_size=2, stride=1)
        self.conv3 = Conv1d(32, 32, kernel_size=2, stride=1)
        self.conv4 = Conv1d(32, 16, kernel_size=1, stride=1)

        self.fc6 = Linear(1024, 512)
        self.fc7 = Linear(512, 256)
        self.fc8 = Linear(256, 128)
        self.fc9 = Linear(256, 128)
        self.fc10 = Linear(128, 10)
        self.lstm1 = LSTM(128, 128, 16)

        self.h1 = (torch.rand((16, 1, 128)) / 64).to(device)
        self.c1 = (torch.rand((16, 1, 128)) / 64).to(device)
        self.drop = Dropout3d(.25)
        self.elu = ELU()
        self.relu = ReLU()
        self.laynorm = GroupNorm(1, 128)

        self.bnorm1 = BatchNorm1d(32)
        self.bnorm2 = BatchNorm1d(32)
        self.bnorm4 = BatchNorm1d(16)

        self.getRawData = getRawData
        self.batch = batch
コード例 #6
0
ファイル: googlenet.py プロジェクト: redsphinx/3tconv
    def __init__(self, pv):
        super(Googlenet3TConv_explicit, self).__init__()

        self.conv1 = ConvTTN3d(in_channels=3, out_channels=64, kernel_size=7, padding=3, stride=2, project_variable=pv, bias=False)
        self.bn1 = BatchNorm3d(64)
        self.maxpool1 = MaxPool3d(kernel_size=(1, 3, 3), padding=0, stride=(1, 2, 2))
        self.conv2 = Conv3d(in_channels=64, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn2 = BatchNorm3d(64)
        self.conv3 = ConvTTN3d(in_channels=64, out_channels=192, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn3 = BatchNorm3d(192)
        self.maxpool2 = MaxPool3d(kernel_size=(1, 3, 3), padding=0, stride=(1, 2, 2))

        # inception 3a
        self.conv4 = Conv3d(in_channels=192, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn4 = BatchNorm3d(64)
        self.conv5 = Conv3d(in_channels=192, out_channels=96, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn5 = BatchNorm3d(96)
        self.conv6 = ConvTTN3d(in_channels=96, out_channels=128, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn6 = BatchNorm3d(128)
        self.conv7 = Conv3d(in_channels=192, out_channels=16, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn7 = BatchNorm3d(16)
        self.conv8 = ConvTTN3d(in_channels=16, out_channels=32, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn8 = BatchNorm3d(32)
        self.maxpool3 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv9 = Conv3d(in_channels=192, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn9 = BatchNorm3d(32)

        # inception 3b
        self.conv10 = Conv3d(in_channels=256, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn10 = BatchNorm3d(128)
        self.conv11 = Conv3d(in_channels=256, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn11 = BatchNorm3d(128)
        self.conv12 = ConvTTN3d(in_channels=128, out_channels=192, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn12 = BatchNorm3d(192)
        self.conv13 = Conv3d(in_channels=256, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn13 = BatchNorm3d(32)
        self.conv14 = ConvTTN3d(in_channels=32, out_channels=96, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn14 = BatchNorm3d(96)
        self.maxpool4 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv15 = Conv3d(in_channels=256, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn15 = BatchNorm3d(64)

        self.maxpool5 = MaxPool3d(kernel_size=3, padding=0, stride=2)

        # inception 4a
        self.conv16 = Conv3d(in_channels=480, out_channels=192, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn16 = BatchNorm3d(192)
        self.conv17 = Conv3d(in_channels=480, out_channels=96, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn17 = BatchNorm3d(96)
        self.conv18 = ConvTTN3d(in_channels=96, out_channels=208, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn18 = BatchNorm3d(208)
        self.conv19 = Conv3d(in_channels=480, out_channels=16, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn19 = BatchNorm3d(16)
        self.conv20 = ConvTTN3d(in_channels=16, out_channels=48, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn20 = BatchNorm3d(48)
        self.maxpool6 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv21 = Conv3d(in_channels=480, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn21 = BatchNorm3d(64)

        # inception 4b
        self.conv22 = Conv3d(in_channels=512, out_channels=160, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn22 = BatchNorm3d(160)
        self.conv23 = Conv3d(in_channels=512, out_channels=112, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn23 = BatchNorm3d(112)
        self.conv24 = ConvTTN3d(in_channels=112, out_channels=224, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn24 = BatchNorm3d(224)
        self.conv25 = Conv3d(in_channels=512, out_channels=24, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn25 = BatchNorm3d(24)
        self.conv26 = ConvTTN3d(in_channels=24, out_channels=64, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn26 = BatchNorm3d(64)
        self.maxpool7 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv27 = Conv3d(in_channels=512, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn27 = BatchNorm3d(64)

        self.avgpool1 = AvgPool3d(kernel_size=5, padding=0, stride=3)
        self.conv28 = Conv3d(in_channels=512, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn28 = BatchNorm3d(128)
        # self.fc1 = Linear(in_features=2304, out_features=1024)
        self.fc1 = Linear(in_features=768, out_features=1024)  # 768
        self.dropout1 = Dropout3d(p=0.7)
        self.fc2 = Linear(in_features=1024, out_features=pv.label_size)

        # inception 4c
        self.conv29 = Conv3d(in_channels=512, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn29 = BatchNorm3d(128)
        self.conv30 = Conv3d(in_channels=512, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn30 = BatchNorm3d(128)
        self.conv31 = ConvTTN3d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn31 = BatchNorm3d(256)
        self.conv32 = Conv3d(in_channels=512, out_channels=24, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn32 = BatchNorm3d(24)
        self.conv33 = ConvTTN3d(in_channels=24, out_channels=64, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn33 = BatchNorm3d(64)
        self.maxpool8 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv34 = Conv3d(in_channels=512, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn34 = BatchNorm3d(64)

        # inception 4d
        self.conv35 = Conv3d(in_channels=512, out_channels=112, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn35 = BatchNorm3d(112)
        self.conv36 = Conv3d(in_channels=512, out_channels=144, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn36 = BatchNorm3d(144)
        self.conv37 = ConvTTN3d(in_channels=144, out_channels=288, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn37 = BatchNorm3d(288)
        self.conv38 = Conv3d(in_channels=512, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn38 = BatchNorm3d(32)
        self.conv39 = ConvTTN3d(in_channels=32, out_channels=64, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn39 = BatchNorm3d(64)
        self.maxpool9 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv40 = Conv3d(in_channels=512, out_channels=64, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn40 = BatchNorm3d(64)

        # inception 4e
        self.conv41 = Conv3d(in_channels=528, out_channels=256, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn41 = BatchNorm3d(256)
        self.conv42 = Conv3d(in_channels=528, out_channels=160, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn42 = BatchNorm3d(160)
        self.conv43 = ConvTTN3d(in_channels=160, out_channels=320, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn43 = BatchNorm3d(320)
        self.conv44 = Conv3d(in_channels=528, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn44 = BatchNorm3d(32)
        self.conv45 = ConvTTN3d(in_channels=32, out_channels=128, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn45 = BatchNorm3d(128)
        self.maxpool10 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv46 = Conv3d(in_channels=528, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn46 = BatchNorm3d(128)

        self.avgpool2 = AvgPool3d(kernel_size=5, padding=0, stride=3)
        self.conv47 = Conv3d(in_channels=528, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn47 = BatchNorm3d(128)
        # self.fc3 = Linear(in_features=2304, out_features=1024)
        self.fc3 = Linear(in_features=768, out_features=1024)
        self.dropout2 = Dropout3d(p=0.7)
        self.fc4 = Linear(in_features=1024, out_features=pv.label_size)

        self.maxpool11 = MaxPool3d(kernel_size=3, padding=0, stride=2)

        # inception 5a
        self.conv48 = Conv3d(in_channels=832, out_channels=256, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn48 = BatchNorm3d(256)
        self.conv49 = Conv3d(in_channels=832, out_channels=160, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn49 = BatchNorm3d(160)
        self.conv50 = ConvTTN3d(in_channels=160, out_channels=320, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn50 = BatchNorm3d(320)
        self.conv51 = Conv3d(in_channels=832, out_channels=32, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn51 = BatchNorm3d(32)
        self.conv52 = ConvTTN3d(in_channels=32, out_channels=128, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn52 = BatchNorm3d(128)
        self.maxpool12 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv53 = Conv3d(in_channels=832, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn53 = BatchNorm3d(128)

        # inception 5b
        self.conv54 = Conv3d(in_channels=832, out_channels=384, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn54 = BatchNorm3d(384)
        self.conv55 = Conv3d(in_channels=832, out_channels=192, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn55 = BatchNorm3d(192)
        self.conv56 = ConvTTN3d(in_channels=192, out_channels=384, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn56 = BatchNorm3d(384)
        self.conv57 = Conv3d(in_channels=832, out_channels=48, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn57 = BatchNorm3d(48)
        self.conv58 = ConvTTN3d(in_channels=48, out_channels=128, kernel_size=3, padding=1, stride=1, project_variable=pv, bias=False)
        self.bn58 = BatchNorm3d(128)
        self.maxpool13 = MaxPool3d(kernel_size=3, padding=1, stride=1)
        self.conv59 = Conv3d(in_channels=832, out_channels=128, kernel_size=1, padding=0, stride=1, bias=False)
        self.bn59 = BatchNorm3d(128)

        self.avgpool3 = AdaptiveAvgPool3d(1)
        self.dropout3 = Dropout3d(p=0.4)
        self.fc5 = Linear(in_features=1024, out_features=pv.label_size)
コード例 #7
0
    def __init__(self, dropout=0, drop_conv=0 , n_classes=1, in_size=[182,218,182],
                 conv_block = [15, 25, 50, 50], linear_block = [50, 40],
                 output_fnc=None, batch_norm=True):

        super(ConvN_FC3, self).__init__()

        self.encoding_blocks = nn.ModuleList()
        for nb_layer in conv_block:
            if len(self.encoding_blocks ) == 0:
                nb_in = 1
                out_size = np.ceil((np.array(in_size) - 2) / 2)
            else :
                out_size = np.ceil((out_size - 2) / 2)

            # block = nn.ModuleList()
            # block.append(nn.Conv3d(nb_in, nb_layer, 3))
            #
            # if batch_norm:
            #     block.append(nn.BatchNorm3d(nb_layer))
            #
            # block.append(nn.ReLU())
            #
            # if drop_conv:
            #     block.append(Dropout3d(p=drop_conv))
            #
            # block.append(block.append(PadMaxPool3d(2, 2)))
            #
            # one_conv = nn.Sequential(*block)
            if batch_norm:
                if drop_conv:
                    one_conv = nn.Sequential(
                        nn.Conv3d(nb_in, nb_layer, 3),
                        nn.BatchNorm3d(nb_layer),
                        nn.ReLU(),
                        Dropout3d(p=drop_conv),
                        PadMaxPool3d(2, 2))
                else:
                    one_conv = nn.Sequential(
                        nn.Conv3d(nb_in, nb_layer, 3),
                        nn.BatchNorm3d(nb_layer),
                        nn.ReLU(),
                        PadMaxPool3d(2, 2) )
            else:
                if drop_conv:
                    one_conv = nn.Sequential(
                        nn.Conv3d(nb_in, nb_layer, 3),
                        nn.ReLU(),
                        Dropout3d(p=drop_conv),
                        PadMaxPool3d(2, 2))
                else:
                    one_conv = nn.Sequential(
                        nn.Conv3d(nb_in, nb_layer, 3),
                        nn.ReLU(),
                        PadMaxPool3d(2, 2))
            self.encoding_blocks.append(one_conv)
            nb_in = nb_layer
        self.encoding_blocks = nn.Sequential(*self.encoding_blocks)

        print('last layer out size {} * {} '.format(out_size,nb_layer))
        out_flatten = np.prod(out_size) * nb_layer
        print('size flatten {}'.format(out_flatten))

        in_size = out_flatten
        self.classifier = nn.ModuleList()
        self.classifier.append(Flatten())

        for nb_out in linear_block :
            on_lin = nn.Sequential(
                nn.Dropout(p=dropout),
                nn.Linear(int(in_size) , nb_out),
                nn.ReLU(),)
            in_size = nb_out
            self.classifier.append(on_lin)

        self.classifier.append(nn.Linear(in_size, n_classes))
        self.classifier = nn.Sequential(*self.classifier)

        self.output_fnc = None
        if output_fnc is not None:
            if output_fnc is 'tanh':
                self.output_fnc = torch.tanh