def get_model(config):
    global is_cuda
    if not is_cuda:
        device = 'cpu'
        unet_model = m.Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
        unet_model = unet_model.to(device)
        unet_model.load_state_dict(torch.load(config['trained_model'], map_location=lambda storage, loc: storage))
    else:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        unet_model = m.Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
        unet_model = unet_model.to(device)
        unet_model.load_state_dict(torch.load(config['trained_model']))
    return unet_model
def get_summary(config):

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # vgg = models.vgg16().to(device)
    unet_model = m.Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
    unet_model = unet_model.to(device)
    # import ipdb as pdb; pdb.set_trace()
    summary(unet_model, (1, 200, 200))
コード例 #3
0
    def build_model(self):
        """Creates and initializes the shared and controller models."""
        if self.args.network_type == 'unet':
            self.shared = models.Unet(self.args)
        else:
            raise NotImplementedError(f'Network type '
                                      f'`{self.args.network_type}` is not '
                                      f'defined')
        self.controller = models.Controller(self.args)

        if self.args.num_gpu == 1:
            self.shared.cuda()
            self.controller.cuda()
        elif self.args.num_gpu > 1:
            raise NotImplementedError('`num_gpu > 1` is in progress')
コード例 #4
0
print 'Create model'
channels = 3
metrics = [jacc_coef]
if model == 'vgg_unet':
    model = models.VGG16_Unet(height,
                              width,
                              pretrained=True,
                              freeze_pretrained=False,
                              loss=loss,
                              optimizer=optimizer,
                              metrics=[jacc_coef])
elif model == 'unet':
    model = models.Unet(height,
                        width,
                        loss=loss,
                        optimizer=optimizer,
                        metrics=metrics,
                        fc_size=4096,
                        channels=channels)
elif model == 'unet2':
    model = models.Unet2(height,
                         width,
                         loss=loss,
                         optimizer=optimizer,
                         metrics=metrics,
                         fc_size=4096,
                         channels=channels)
else:
    print "Incorrect model name"

コード例 #5
0
def run_main(config):
    dataset_base_path = "./data/"
    target_path = natsorted(glob(dataset_base_path + '/mask/*.png'))
    image_paths = natsorted(glob(dataset_base_path + '/img/*.png'))
    target_val_path = natsorted(glob(dataset_base_path + '/val_mask/*.png'))
    image_val_path = natsorted(glob(dataset_base_path + '/val_img/*.png'))

    nih_dataset_train = EMdataset(image_paths=image_paths,
                                  target_paths=target_path)
    nih_dataset_val = EMdataset(image_paths=image_val_path,
                                target_paths=target_val_path)

    #import ipdb as pdb; pdb.set_trace()
    train_loader = DataLoader(nih_dataset_train,
                              batch_size=16,
                              shuffle=True,
                              num_workers=1)
    val_loader = DataLoader(nih_dataset_val,
                            batch_size=16,
                            shuffle=True,
                            num_workers=1)
    model = m.Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
    if config['operation_mode'].lower(
    ) == "retrain" or config['operation_mode'].lower() == "inference":
        print("Using a trained model...")
        model.load_state_dict(torch.load(config['trained_model']))
    elif config["operation_mode"].lower() == "visualize":
        print("Using a trained model...")
        if cuda:
            model.load_state_dict(torch.load(config['trained_model']))
        else:
            model.load_state_dict(
                torch.load(config['trained_model'], map_location='cpu'))
        v.visualize_model(model, config)
        return

    # import ipdb as pdb; pdb.set_trace()
    if cuda:
        model.cuda()
        print('gpu_activate')

    num_epochs = config["num_epochs"]
    initial_lr = config["lr"]
    experiment_path = config["log_output_dir"] + config['experiment_name']
    output_image_dir = experiment_path + "/figs/"

    betas = torch.linspace(3.0, 8.0, num_epochs)

    # criterion  = nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=initial_lr)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)

    # import ipdb as pdb; pdb.set_trace()
    writer = SummaryWriter(log_dir=utility.get_experiment_dir(config))
    best_score = 0
    for epoch in tqdm(range(1, num_epochs + 1)):
        start_time = time.time()

        scheduler.step()

        lr = scheduler.get_lr()[0]
        model.beta = betas[epoch - 1]  # for ternary net, set beta
        writer.add_scalar('learning_rate', lr, epoch)

        model.train()
        train_loss_total = 0.0
        num_steps = 0
        capture = True
        for i, batch in enumerate(train_loader):
            input_samples, gt_samples = batch[0], batch[1]

            if cuda:
                var_input = input_samples.cuda()
                var_gt = gt_samples.cuda()
            else:
                var_input = input_samples
                var_gt = gt_samples
            preds = model(var_input)
            loss = dice_loss(preds, var_gt)
            # import ipdb as pdb; pdb.set_trace()
            var_gt = var_gt.float()
            train_loss_total += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            num_steps += 1
            if epoch % 1 == 0 and capture:
                capture = False
                input_samples, gt_samples = get_samples(
                    image_val_path, target_val_path, 4)
                if cuda:
                    input_samples = input_samples.cuda()
                preds = model(input_samples)
                input_samples = input_samples.data.cpu().numpy()
                preds = preds.data.cpu().numpy()
                # import ipdb as pdb; pdb.set_trace()
                save_image(input_samples[0][0], gt_samples[0][0], preds[0][0],
                           epoch, 0, output_image_dir)

        train_loss_total_avg = train_loss_total / num_steps

        # import ipdb as pdb; pdb.set_trace()
        model.eval()
        val_loss_total = 0.0
        num_steps = 0

        metric_fns = [
            dice_score, hausdorff_score, precision_score, recall_score,
            specificity_score, intersection_over_union, accuracy_score
        ]

        metric_mgr = MetricManager(metric_fns)

        for i, batch in enumerate(val_loader):
            input_samples, gt_samples = batch[0], batch[1]

            with torch.no_grad():
                if cuda:
                    var_input = input_samples.cuda()
                    var_gt = gt_samples.cuda(async=True)
                else:
                    var_input = input_samples
                    var_gt = gt_samples

                preds = model(var_input)
                loss = dice_loss(preds, var_gt)
                # loss = criterion(preds, var_gt)
                # loss = weighted_bce_loss(preds, var_gt, 0.5, 2.5)
                val_loss_total += loss.item()

            gt_npy = gt_samples.data.cpu().numpy()  #.astype(np.uint8)
            gt_npy = gt_npy.squeeze(axis=1)

            preds = preds.data.cpu().numpy()
            preds = threshold_predictions(preds)
            # preds = preds.astype(np.uint8)
            preds = preds.squeeze(axis=1)

            metric_mgr(preds, gt_npy)

            num_steps += 1

        metrics_dict = metric_mgr.get_results()
        metric_mgr.reset()

        writer.add_scalars('metrics', metrics_dict, epoch)

        val_loss_total_avg = val_loss_total / num_steps

        writer.add_scalars('losses', {
            'val_loss': val_loss_total_avg,
            'train_loss': train_loss_total_avg
        }, epoch)

        end_time = time.time()
        total_time = end_time - start_time
        msg = "Epoch {} took {:.2f} seconds dice_score={}. precision={} iou={} loss_train={} val_loss={}".format(
            epoch, total_time, metrics_dict["dice_score"],
            metrics_dict["precision_score"],
            metrics_dict["intersection_over_union"], train_loss_total_avg,
            val_loss_total_avg)
        utility.log_info(config, msg)
        tqdm.write(msg)
        writer.add_scalars('losses', {'train_loss': train_loss_total_avg},
                           epoch)

        if metrics_dict["dice_score"] > best_score:
            best_score = metrics_dict["dice_score"]
            utility.save_model(model=model, config=config)

    if not (config['operation_mode'].lower() == "inference"):
        utility.save_model(model=model, config=config)
コード例 #6
0
ファイル: train_sgmt.py プロジェクト: doem97/IVU-segment
            'fc2': 'categorical_crossentropy',
        },
        loss_weights={
            'conv7': 1.,
            'fc2': 0.,
        },
        metrics={
            'conv7': custom_metric,
            #'fc2': 'categorical_accuracy',
        })
    monitor_metric = 'val_conv7_jacc_coef'  #only when multi-output it will be like ._conv7_.
elif model_type == 'unet':
    model = models.Unet(height,
                        width,
                        custom_loss=custom_loss,
                        optimizer=optimizer,
                        custom_metrics=custom_metric,
                        fc_size=fc_size,
                        channels=channels)
    monitor_metric = 'val_jacc_coef'
elif model_type == 'unet2':
    model = models.Unet2(height,
                         width,
                         custom_loss=custom_loss,
                         optimizer=optimizer,
                         custom_metrics=custom_metric,
                         fc_size=fc_size,
                         channels=channels)
    monitor_metric = 'val_jacc_coef'
elif model_type == 'vgg':
    VGG16_WEIGHTS_NOTOP = project_path + 'pretrained_weights/vgg16_notop.h5'
コード例 #7
0
                        help='output model file')
    parser.add_argument("--batch-size",
                        type=int,
                        default=16,
                        help='batch size')
    args = parser.parse_args()

    # directory = Path(args.input)
    # files = [f for f in directory.iterdir() if f.suffix == ".pth"]
    files = glob.glob(args.input + "/stage1/checkpoints/stage1.*.pth")
    files += glob.glob(args.input + "/stage2/checkpoints/stage1.*.pth")
    assert (len(files) > 1)

    net = models.Unet(
        encoder_name="resnet34",
        activation='sigmoid',
        classes=1,
        # center=True
    )
    checkpoint = torch.load(files[0])
    net.load_state_dict(checkpoint['model_state_dict'])

    for i, f in enumerate(files[1:]):
        # net2 = model.load(f)
        net2 = models.Unet(
            encoder_name="resnet34",
            activation='sigmoid',
            classes=1,
            # center=True
        )
        checkpoint = torch.load(f)
        net2.load_state_dict(checkpoint['model_state_dict'])
def get_model(config):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    unet_model = m.Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
    unet_model = unet_model.to(device)
    unet_model.load_state_dict(torch.load(config['trained_model']))
    return unet_model
コード例 #9
0
initial_lr = 0.001
num_epochs = 550
opt_loss = 10000


def threshold(array):
    array = (array > 0.89) * 1.0
    return array


# print(torch.cuda.is_available())
# print(torch.cuda.current_device())
dirr = '/home/eljurros/spare-workplace/Multi_Organ_Segmentation/DataSet'

if model_type == 'Unet':
    model = models.Unet(drop_rate=0.4, bn_momentum=0.1)
    model = torch.load(
        '/home/eljurros/Desktop/myria_scripts/checkpnt_BEST.ckpt')

elif model_type == 'BB_Unet':
    model = models.BB_Unet(drop_rate=0.4, bn_momentum=0.1)

optimizer = optim.Adam(model.parameters(), lr=initial_lr)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
train_transform = None
txt_file_path = '../Multi_Organ_Segmentation/results_MO_newest_edition_2.txt'

dice = organ_0 = organ_1 = organ_2 = organ_3 = 0
start_time = time.time()

dataset = DataSEt_Classes.SegTHor_2D_TrainDS(dirr,
コード例 #10
0
def run_main(config):
    train_transform = transforms.Compose([
        CenterCrop2D((200, 200)),
        ElasticTransform(alpha_range=(28.0, 30.0),
                         sigma_range=(3.5, 4.0),
                         p=0.3),
        RandomAffine(degrees=4.6, scale=(0.98, 1.02), translate=(0.03, 0.03)),
        RandomTensorChannelShift((-0.10, 0.10)),
        ToTensor(),
        NormalizeInstance(),
    ])

    val_transform = transforms.Compose([
        CenterCrop2D((200, 200)),
        ToTensor(),
        NormalizeInstance(),
    ])
    # import ipdb as pdb; pdb.set_trace()
    dataset_base_path = "/export/tmp/hemmat/datasets/em_challenge/"
    target_path = natsort.natsorted(glob.glob(dataset_base_path +
                                              'mask/*.PNG'))
    image_paths = natsort.natsorted(glob.glob(dataset_base_path +
                                              'data/*.PNG'))
    target_val_path = natsort.natsorted(
        glob.glob(dataset_base_path + 'val_mask/*.PNG'))
    image_val_path = natsort.natsorted(
        glob.glob(dataset_base_path + 'val_img/*.PNG'))

    gmdataset_train = EMdataset(image_paths=image_paths,
                                target_paths=target_path)
    gmdataset_val = EMdataset(image_paths=image_val_path,
                              target_paths=target_val_path)
    train_loader = DataLoader(gmdataset_train,
                              batch_size=5,
                              shuffle=True,
                              num_workers=1)
    val_loader = DataLoader(gmdataset_val,
                            batch_size=4,
                            shuffle=True,
                            num_workers=1)

    utility.create_log_file(config)
    utility.log_info(
        config, "{0}\nStarting experiment {1}\n{0}\n".format(
            50 * "=", utility.get_experiment_name(config)))
    # import ipdb as pdb; pdb.set_trace()
    model = m.Unet(drop_rate=0.4, bn_momentum=0.1, config=config)
    if config['operation_mode'].lower(
    ) == "retrain" or config['operation_mode'].lower() == "inference":
        print("Using a trained model...")
        model.load_state_dict(torch.load(config['trained_model']))
    elif config["operation_mode"].lower() == "visualize":
        print("Using a trained model...")
        if cuda:
            model.load_state_dict(torch.load(config['trained_model']))
        else:
            model.load_state_dict(
                torch.load(config['trained_model'], map_location='cpu'))
        mv.visualize_model(model, config)
        return

    # import ipdb as pdb; pdb.set_trace()
    if cuda:
        model.cuda()

    num_epochs = config["num_epochs"]
    initial_lr = config["lr"]
    experiment_path = config["log_output_dir"] + config['experiment_name']
    output_image_dir = experiment_path + "/figs/"

    betas = torch.linspace(3.0, 8.0, num_epochs)
    optimizer = optim.Adam(model.parameters(), lr=initial_lr)
    # scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs)
    lr_milestones = range(0, int(num_epochs), int(int(num_epochs) / 5))
    lr_milestones = lr_milestones[1:]
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=lr_milestones,
                                               gamma=0.1)

    # import ipdb as pdb; pdb.set_trace()
    writer = SummaryWriter(log_dir=utility.get_experiment_dir(config))
    best_dice = 0
    for epoch in tqdm(range(1, num_epochs + 1)):
        start_time = time.time()

        scheduler.step()

        lr = scheduler.get_lr()[0]
        model.beta = betas[epoch - 1]  # for ternary net, set beta
        writer.add_scalar('learning_rate', lr, epoch)

        model.train()
        train_loss_total = 0.0
        num_steps = 0
        capture = True
        for i, batch in enumerate(train_loader):
            #import ipdb as pdb; pdb.set_trace()
            input_samples, gt_samples, idx = batch[0], batch[1], batch[2]

            if cuda:
                var_input = input_samples.cuda()
                var_gt = gt_samples.cuda(async=True)
                var_gt = var_gt.float()
            else:
                var_input = input_samples
                var_gt = gt_samples
                var_gt = var_gt.float()
            preds = model(var_input)

            # import ipdb as pdb; pdb.set_trace()
            loss = calc_loss(preds, var_gt)
            train_loss_total += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            num_steps += 1
            if epoch % 5 == 0 and capture:
                capture = False
                input_samples, gt_samples = get_samples(
                    image_val_path, target_val_path, 4)
                if cuda:
                    input_samples = input_samples.cuda()
                preds = model(input_samples)
                input_samples = input_samples.data.cpu().numpy()
                preds = preds.data.cpu().numpy()
                # import ipdb as pdb; pdb.set_trace()
                save_image(input_samples[0][0], gt_samples[0][0], preds[0][0],
                           epoch, 0, output_image_dir)

        train_loss_total_avg = train_loss_total / num_steps

        # import ipdb as pdb; pdb.set_trace()
        model.train()
        val_loss_total = 0.0
        num_steps = 0

        metric_fns = [
            dice_score, hausdorff_score, precision_score, recall_score,
            specificity_score, intersection_over_union, accuracy_score,
            rand_index_score
        ]

        metric_mgr = MetricManager(metric_fns)

        for i, batch in enumerate(val_loader):
            #            input_samples, gt_samples = batch[0], batch[1]
            input_samples, gt_samples, idx = batch[0], batch[1], batch[2]
            with torch.no_grad():
                if cuda:
                    var_input = input_samples.cuda()
                    var_gt = gt_samples.cuda(async=True)
                    var_gt = var_gt.float()
                else:
                    var_input = input_samples
                    var_gt = gt_samples
                    var_gt = var_gt.float()
                # import ipdb as pdb; pdb.set_trace()
                preds = model(var_input)
                loss = dice_loss(preds, var_gt)
                val_loss_total += loss.item()
            # Metrics computation
            gt_npy = gt_samples.numpy().astype(np.uint8)
            gt_npy = gt_npy.squeeze(axis=1)

            preds = preds.data.cpu().numpy()
            preds = threshold_predictions(preds)
            preds = preds.astype(np.uint8)
            preds = preds.squeeze(axis=1)
            metric_mgr(preds, gt_npy)
            #save_image(input_samples[0][0], preds[0], gt_samples, epoch, idx[0])
            # save_pred(model, image_val_path, epoch, output_image_dir)
            num_steps += 1

        metrics_dict = metric_mgr.get_results()
        metric_mgr.reset()

        writer.add_scalars('metrics', metrics_dict, epoch)

        # import ipdb as pdb; pdb.set_trace()
        val_loss_total_avg = val_loss_total / num_steps

        writer.add_scalars('losses', {
            'val_loss': val_loss_total_avg,
            'train_loss': train_loss_total_avg
        }, epoch)

        end_time = time.time()
        total_time = end_time - start_time
        # import ipdb as pdb; pdb.set_trace()
        log_str = "Epoch {} took {:.2f} seconds train_loss={}   dice_score={}   rand_index_score={}  lr={}.".format(
            epoch, total_time, train_loss_total_avg,
            metrics_dict["dice_score"], metrics_dict["rand_index_score"],
            get_lr(optimizer))
        utility.log_info(config, log_str)
        tqdm.write(log_str)

        writer.add_scalars('losses', {'train_loss': train_loss_total_avg},
                           epoch)
        if metrics_dict["dice_score"] > best_dice:
            best_dice = metrics_dict["dice_score"]
            utility.save_model(model=model, config=config)
    if not (config['operation_mode'].lower() == "inference"):
        utility.save_model(model=model, config=config)