Example #1
0
 def set_model(self, params):
     'Return a list with all classes'
     if params['model_type'].lower() == 'full':
         from Models.Full_Connected import Full_Connected
         self.__model = Full_Connected(params=params)
     elif params['model_type'].lower() == 'convolutional':
         from Models.Convolutional import Convolutional
         self.__model = Convolutional(params=params)
     elif params['model_type'].lower() == 'vgg16':
         from Models.VGG16 import VGG16
         self.__model = VGG16(params=params)
     elif params['model_type'].lower() == 'mobile':
         from Models.Mobile_Net import Mobile_Net
         self.__model = Mobile_Net(params=params)
     elif params['model_type'].lower() == 'mobilev2':
         from Models.Mobile_NetV2 import Mobile_NetV2
         self.__model = Mobile_NetV2(params=params)
     elif params['model_type'].lower() == 'resnet':
         from Models.ResNet import ResNet
         self.__model = ResNet(params=params)
     elif params['model_type'].lower() == 'unet':
         from Models.UNet import UNet
         self.__model = UNet(params=params)
     else:
         from Models.Custom_Network import Custom_Network
         self.__model = Custom_Network(params=params)
Example #2
0
def evaluate_generator(model_path):
    '''
        Evaluate performance of CT slices using generator

        :model_path: path of evaluated model
    '''
    model = UNet(input_shape=(512, 512, 1))
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=focal_tversky_loss,
                  metrics=[dice_coef])
    model.load_weights(model_path)

    image_path = 'generate_data/test.h5'  # h5 file of evaluated patients
    TOTAL_IMAGE = 1814  # total images in h5 file
    BATCH_SIZE = 2

    reader = HDF5DatasetGenerator(db_path=image_path, batch_size=BATCH_SIZE)
    generator = reader.generator()

    results = model.evaluate_generator(generator,
                                       steps=TOTAL_IMAGE // BATCH_SIZE,
                                       verbose=1)
    for i, metric in enumerate(model.metrics_names):
        print(metric, ':', results[i])

    reader.close()
Example #3
0
def train():

    train_reader = HDF5DatasetGenerator(db_path=train_path,
                                        batch_size=BATCH_SIZE)
    train_iter = train_reader.generator()

    val_reader = HDF5DatasetGenerator(db_path=val_path, batch_size=BATCH_SIZE)
    val_iter = val_reader.generator()

    model = UNet(input_shape=(512, 512, 1))
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=focal_tversky_loss,
                  metrics=[dice_coef])

    if not os.path.exists(save_path):
        os.mkdir(save_path)
        os.mkdir(save_path + '/model')
        os.mkdir(save_path + '/model/logs')

    model_checkpoint = ModelCheckpoint(
        save_path + '/model/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
        monitor='val_loss',
        verbose=1,
        save_best_only=True)
    tensorboard = TensorBoard(log_dir=save_path + '/model/logs')
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=4,
                                  mode='auto')
    callbacks = [model_checkpoint, tensorboard, reduce_lr]

    model.fit_generator(train_iter,
                        steps_per_epoch=TOTAL_TRAIN // BATCH_SIZE,
                        epochs=20,
                        validation_data=val_iter,
                        validation_steps=TOTAL_VAL // BATCH_SIZE,
                        verbose=1,
                        callbacks=callbacks)

    train_reader.close()
    val_reader.close()

    model.save(save_path + '/model/model.h5')
    print('Finished training ......')
Example #4
0
def predict(model_path, idx):
    '''
        Predict segmentation mask of single CT slice

        :model_path: path of evaluated model
        :idx: index of evaluated patient
    '''
    pred_dir = 'pred/'
    if not os.path.exists(pred_dir):
        os.mkdir(pred_dir)

    model = UNet((512, 512, 1))
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=focal_tversky_loss,
                  metrics=[dice_coef])
    model.load_weights(model_path)

    # reading original images
    reader = sitk.ImageSeriesReader()
    dicom_names = reader.GetGDCMSeriesFileNames(data_path + 'volume/' +
                                                str(idx) + '/')
    reader.SetFileNames(dicom_names)
    images = reader.Execute()
    images = sitk.GetArrayFromImage(images)  # images.shape: (slices, 512, 512)

    # HU values are restricted to lung window settings [-1100, 300]
    # namely: window width: 1400, window level: -400
    images[images < -1100] = -1100
    images[images > 300] = 300

    # normalization
    max_value, min_value = images.max(), images.min()
    images = images / (max_value - min_value)

    pred_inputs = np.expand_dims(
        images, axis=-1)  # pred_inputs.shape: (slices, 512, 512, 1)
    pred_masks = model.predict(
        pred_inputs, batch_size=1,
        verbose=1)  # pred_masks.shape: (slices, 512, 512, 1)

    # values in pred_masks are probabilities in range [0, 1]
    # converting to binary mask using a threshold of 0.5
    pred_masks[pred_masks >= 0.5] = 1
    pred_masks[pred_masks < 0.5] = 0
    pred_masks = pred_masks.astype(int)

    out = sitk.GetImageFromArray(pred_masks)
    sitk.WriteImage(out, pred_dir + 'pred_threshold_' + str(idx) + '.nii.gz')
Example #5
0
def evaluate(model_path, idx):
    '''
        Evaluate performance of single CT slice

        :model_path: path of evaluated model
        :idx: index of evaluated patient
    '''
    model = UNet(input_shape=(512, 512, 1))
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=focal_tversky_loss,
                  metrics=[dice_coef])
    model.load_weights(model_path)

    # reading original images
    reader = sitk.ImageSeriesReader()
    dicom_names = reader.GetGDCMSeriesFileNames(data_path + 'volume/' +
                                                str(idx) + '/')
    reader.SetFileNames(dicom_names)
    images = reader.Execute()
    images = sitk.GetArrayFromImage(images)  # images.shape: (slices, 512, 512)

    # HU values are restricted to lung window settings [-1100, 300]
    # namely: window width: 1400, window level: -400
    images[images < -1100] = -1100
    images[images > 300] = 300

    # normalization
    max_value, min_value = images.max(), images.min()
    images = images / (max_value - min_value)

    # reading ground truth labels
    labels = sitk.ReadImage(data_path + 'segmentation/' + str(idx) + '.nii')
    labels = sitk.GetArrayFromImage(
        labels)  # eval_labels.shape: (slices, 512, 512)

    # (slices, 512, 512) => (slices, 512, 512, 1)
    images = np.expand_dims(images, axis=-1)
    labels = np.expand_dims(labels, axis=-1)

    results = model.evaluate(images, labels, batch_size=1)
    for i, metric in enumerate(model.metrics_names):
        print(metric, ':', results[i])
Example #6
0
    train_loader = DataLoader(train_loader, args.batch, True, num_workers=4)
    test_loader = DataLoader(test_loader, args.batch, False, num_workers=4)
    classes = getClasses(args.classes)
    weights = None
    loaded = False
    if os.path.isfile(args.weights):
        weights = np.load(args.weights)
        weights = weights / np.sum(weights)
        weights = 1 - weights
        print(weights)
    if os.path.isfile(args.model):
        model = torch.load(args.model).to(args.device)
        print("Model loaded!")
        loaded = True
    else:
        model = UNet(4, len(classes), args.depth, args.filters).to(args.device)
        model.initialize()
        print("Model initialized!")
    if weights is None:
        criterion = torch.nn.CrossEntropyLoss().to(args.device)
    else:
        weights = torch.FloatTensor(weights).to(args.device)
        criterion = torch.nn.CrossEntropyLoss(weight=weights).to(args.device)
    optimizer = torch.optim.Adam(model.parameters(), args.lr)

    train_matrix = np.zeros((len(classes), len(classes)), dtype=np.int32)

    def onTrainBatch(batch_id, features, labels, output, loss):
        global train_matrix
        output = torch.argmax(output, dim=1)
        mat = confusion_matrix(labels, output, len(classes))
Example #7
0
def get_experiment_objs(exp_dir, create_model=True):
    settings = load_pickle_obj(exp_dir / 'settings.pkl')
    params = dict(num_categories=settings['num_categories'],
                  pretrained_on=settings['pretrained_on'],
                  num_epochs=settings['num_epochs'],
                  device=settings['device'])
    model_name = settings['model_name']
    model = None
    if create_model:
        if settings['continue_training']:
            assert settings['initialize_weights'] is False and settings[
                'continue_model_path'] is not None
            model = torch.load(settings['continue_model_path'])
        else:
            # base model:
            if model_name == 'unet':
                model = UNet(in_channels=settings['in_channels'],
                             num_categories=settings['num_categories'],
                             filter_sizes=settings['filter_sizes'],
                             deep_supervision=settings['deep_supervision'])
            # attention models:
            elif model_name == 'attention_unet':
                model = AttentionUNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    filter_sizes=settings['filter_sizes'],
                    deep_supervision=settings['deep_supervision'])
            elif model_name == 'cbam_unet':
                model = CBAM_UNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    filter_sizes=settings['filter_sizes'],
                    deep_supervision=settings['deep_supervision'])
            elif model_name == 'dualattention_unet':
                model = DualAttentionUNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    filter_sizes=settings['filter_sizes'],
                    deep_supervision=settings['deep_supervision'])
            elif model_name == 'residualattention_unet':
                model = ResidualAttentionUNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    filter_sizes=settings['filter_sizes'],
                    deep_supervision=settings['deep_supervision'])
            elif model_name == 'scag_unet':
                model = scAG_UNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    filter_sizes=settings['filter_sizes'],
                    deep_supervision=settings['deep_supervision'])
            # transfer learning models:
            elif model_name == 'densenet121_unet':
                model = DenseNet121_UNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    deep_supervision=settings['deep_supervision'],
                    pretrained=True)
            elif model_name == 'mobilenetv2_unet':
                model = MobileNetV2_UNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    deep_supervision=settings['deep_supervision'],
                    pretrained=True)
            elif model_name == 'resnet34_unet':
                model = ResNet34_UNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    deep_supervision=settings['deep_supervision'],
                    pretrained=True)
            elif model_name == 'vgg11_unet':
                model = vgg11_UNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    deep_supervision=settings['deep_supervision'],
                    pretrained=True)
            elif model_name == 'vgg16_unet':
                model = vgg16_UNet(
                    in_channels=settings['in_channels'],
                    num_categories=settings['num_categories'],
                    deep_supervision=settings['deep_supervision'],
                    pretrained=True)
            assert model is not None
    dataloader_path = settings['dataloader_path']
    learning_rate_dict = dict(
        static_lr=settings['learning_rate_dict']['static_lr'],
        use_cyclic_learning_rate=settings['learning_rate_dict']
        ['use_cyclic_learning_rate'],
        base_lr=settings['learning_rate_dict']['base_lr'],
        max_lr=settings['learning_rate_dict']['max_lr'])
    return params, model, dataloader_path, learning_rate_dict, settings
    )

if command == '--Help':
    print("Check the documentation.")

elif command == "--Train" or command == "--Predict":
    try:
        architecture = sys.argv[2]
    except IndexError:
        print(
            "An Index error has occured! Check the documentation to make sure your passing all the required arguments."
        )

    if command == "--Train":
        if architecture == '--UNet':
            UNet = UNet()
            UNet.train()
        elif architecture == '--UNet++':
            UNetPP = UNetPP()
            UNetPP.train()
        else:
            raise Exception(
                "You have passed an invalid argument.\nCheck the documentation for the allowed arguments."
            )

    elif command == "--Predict":
        if architecture == '--UNet':
            UNet = UNet()
            UNet.predict()
        elif architecture == '--UNet++':
            UNetPP = UNetPP()
Example #9
0
    classes = getClasses(args.classes)
    weights = None
    loaded = False
    if os.path.isfile(args.weights):
        weights = np.load(args.weights)
        weights = weights / np.sum(weights)
        weights = 1 - weights
        print(weights)
    if os.path.isfile(args.model):
        model = torch.load(args.model).to(args.device)
        print("Model loaded!")
        loaded = True
    else:
        model = UNet(4,
                     len(classes),
                     args.depth,
                     args.filters,
                     convPerLayer=args.convperlayer,
                     dropout=args.dropout).to(args.device)
        model.initialize()
        print("Model initialized!")
    if weights is None:
        criterion = torch.nn.CrossEntropyLoss().to(args.device)
    else:
        weights = torch.FloatTensor(weights).to(args.device)
        criterion = torch.nn.CrossEntropyLoss(weight=weights).to(args.device)
    optimizer = torch.optim.Adam(model.parameters(), args.lr)

    def print_matrix(matrix, classes):
        p = precision(matrix)
        r = recall(matrix)
        f = f1(p, r)
Example #10
0
import matplotlib.pyplot as plt

# Params:
min_lr = 1e-9
max_lr = 1.0
window_size_smoothing = 100
num_epochs = 3
momentum = .95
params = dict(
    num_categories=2,
    pretrained_on='Imagenet',
    num_epochs=100,
    device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
dataloader_path = Path('tmp/Dataloader_SN1_Buildings.pkl')
model = UNet(in_channels=3,
             num_categories=params['num_categories'],
             filter_sizes=(32, 64, 128, 256, 512),
             deep_supervision=True)
running_experiment = False
initialize_weights = True
freeze_encoder = False
continue_training = False
if len(sys.argv) > 1:
    exp_dir = Path(sys.argv[1])
    running_experiment = True
    params, model, dataloader_path, learning_rate_dict, experiment_settings = get_experiment_objs(
        exp_dir)
    initialize_weights = experiment_settings['initialize_weights']
    freeze_encoder = experiment_settings['freeze_encoder']
    continue_training = experiment_settings['continue_training']
model.to(params['device'])
if initialize_weights: