class SH_UNet(nn.Module):
    def __init__(self, path_to_shape_net_weights='', n_classes=2):
        super(SH_UNet, self).__init__()

        self.unet = EESPNet_Seg(classes=2, s=2)
        self.shapeUNet = ShapeUNet((2, 1024, 1024))
        self.softmax = nn.Softmax(dim=1)
        if path_to_shape_net_weights:
            self.shapeUNet.load_state_dict(
                torch.load(path_to_shape_net_weights))

    def forward(self, x, only_encode=False):
        if only_encode:
            _, encoded_mask = self.shapeUNet(x)
            return encoded_mask

        if self.training:
            unet_prediction, unet_prediction_1 = self.unet(x)
        else:
            unet_prediction = self.unet(x)

        softmax_unet_prediction = self.softmax(unet_prediction)  #.detach()
        shape_net_final_prediction, shape_net_encoded_prediction = self.shapeUNet(
            softmax_unet_prediction)

        if self.training:
            return unet_prediction, unet_prediction_1, shape_net_encoded_prediction, shape_net_final_prediction
        else:
            return unet_prediction, shape_net_encoded_prediction, shape_net_final_prediction
    def __init__(self, path_to_shape_net_weights='', n_classes=2):
        super(SH_UNet, self).__init__()

        self.unet = EESPNet_Seg(classes=2, s=2)
        self.shapeUNet = ShapeUNet((2, 512, 512))
        self.softmax = nn.Softmax(dim=1)
        if path_to_shape_net_weights:
            self.shapeUNet.load_state_dict(torch.load(path_to_shape_net_weights))
class SH_UNet(nn.Module):
    def __init__(self, path_to_shape_net_weights='', n_classes=2):
        super(SH_UNet, self).__init__()

        self.unet = UNet((3, 544, 544))
        self.shapeUNet = ShapeUNet((4, 544, 544))
        self.softmax = nn.Softmax(dim=1)
        if path_to_shape_net_weights:
            self.shapeUNet.load_state_dict(
                torch.load(path_to_shape_net_weights))

    def forward(self, x, only_encode=False):
        if only_encode:
            _, encoded_mask = self.shapeUNet(x)
            return encoded_mask

        unet_prediction = self.unet(x)
        softmax_unet_prediction = self.softmax(unet_prediction)  #.detach()
        shape_net_final_prediction, shape_net_encoded_prediction = self.shapeUNet(
            softmax_unet_prediction)

        return unet_prediction, shape_net_encoded_prediction, shape_net_final_prediction
예제 #4
0
# Create Data Loaders
partition = 'train'
shape_train = ShapeData(ROOT_DIR, partition)
train_loader = torch.utils.data.DataLoader(shape_train,
                                             batch_size=10, 
                                             shuffle=True,
                                            )
partition = 'val'
shape_val = ShapeData(ROOT_DIR, partition)
val_loader = torch.utils.data.DataLoader(shape_val,
                                        batch_size=10,
                                        shuffle=False
                                        )
# Create model

model = ShapeUNet((1, 200,200))
model.to(device)

# Specify optimizer and criterion
lr = 1e-4
optimizer = Adam(model.parameters(), lr=lr)
NUM_OF_EPOCHS = 50

#training
train(model, train_loader, val_loader, optimizer, NUM_OF_EPOCHS, 'weights/')

# PATH_TO_SAVE = 'predictions/'

# preds = []
# masks = []
# imgs = []
예제 #5
0
partition = 'val'
shape_val = ShapeData(ROOT_DIR, partition)
val_loader = torch.utils.data.DataLoader(shape_val,
                                         batch_size=1,
                                         shuffle=False)

partition = 'train'
unet_train = ShapeData(ROOT_DIR, partition)
unet_loader = torch.utils.data.DataLoader(
    unet_train,
    batch_size=1,
    shuffle=True,
)

# Create model
model = ShapeUNet((4, 544, 544))
unet = UNet((3, 544, 544))
model.to(device)
unet.to(device)

mask_values = (0, 1, 2, 3)
# here not RGB but BGR because of OPENCV.
real_colors = ((0, 0, 0), (255, 0, 0), (0, 255, 0), (0, 0, 255))

lr = 1e-4
optimizer = Adam(model.parameters(), lr=lr)
NUM_OF_EPOCHS = 24

lr1 = 1e-4
unet_optim = Adam(unet.parameters(), lr=lr1)
    def __init__(self, path_to_shape_net_weights, n_classes=4):
        super(SH_UNet, self).__init__()

        self.unet = UNet((3, 544, 544))
        self.shapeUNet = ShapeUNet((4, 544, 544))
# Create Data Loaders
partition = 'train'
ultrasound_train = UltrasoundDataShapeNet(ROOT_DIR, partition)
train_loader = torch.utils.data.DataLoader(
    ultrasound_train,
    batch_size=2,
    shuffle=True,
)
partition = 'val'
ultrasound_val = UltrasoundDataShapeNet(ROOT_DIR, partition)
val_loader = torch.utils.data.DataLoader(ultrasound_val,
                                         batch_size=1,
                                         shuffle=False)
# # Create model
model = ShapeUNet((4, 544, 544))
weights = torch.load(
    'weights_shape_net/Unet_manually_corrupted_lumen0.113253.pth')
model.load_state_dict(weights)
model.to(device)

# Specify optimizer and criterion
lr = 1e-4
optimizer = Adam(model.parameters(), lr=lr)

NUM_OF_EPOCHS = 50
train(model, train_loader, val_loader, optimizer, NUM_OF_EPOCHS,
      'weights_shape_net/')

# import torch
# import torch.nn as nn
# Create Data Loaders
partition = 'train'
ultrasound_train = UltrasoundData(ROOT_DIR, partition)
train_loader = torch.utils.data.DataLoader(
    ultrasound_train,
    batch_size=1,
    shuffle=True,
)
partition = 'val'
ultrasound_val = UltrasoundData(ROOT_DIR, partition)
val_loader = torch.utils.data.DataLoader(ultrasound_val,
                                         batch_size=1,
                                         shuffle=False)
# # Create models
PATH_TO_SHAPE_WEIGHT_MODEL = 'weights_shape_net/Unet_manually_corrupted_lumen0.113253.pth'

# Specify optimizer and criterion

NUM_OF_EPOCHS = 50
unet = UNet((3, 544, 544))
shapeUNet = ShapeUNet((4, 544, 544))
shapeUNet.load_state_dict(torch.load(PATH_TO_SHAPE_WEIGHT_MODEL))

unet.to(device)
shapeUNet.to(device)
lr = 1e-4
optimizer1 = Adam(shapeUNet.parameters(), lr=lr)
optimizer2 = Adam(unet.parameters(), lr=lr)
train(unet, shapeUNet, train_loader, val_loader, optimizer1, optimizer2,
      NUM_OF_EPOCHS, 'weights_shape_net/')