示例#1
0
def train(args,
          train_loader,
          model,
          criterion,
          optimizer,
          epoch,
          scheduler=None):
    losses = AverageMeter()
    ious = AverageMeter()
    dices_1s = AverageMeter()
    dices_2s = AverageMeter()
    model.train()

    for i, (input, target) in tqdm(enumerate(train_loader),
                                   total=len(train_loader)):

        #print(input.shape)
        #print(target.shape)
        #v = input()
        input = input.cuda()
        target = target.cuda()

        # compute output
        if args.deepsupervision:
            outputs = model(input)
            loss = 0
            for output in outputs:
                loss += criterion(output, target)
            loss /= len(outputs)
            iou = iou_score(outputs[-1], target)
        else:
            output = model(input)
            loss = criterion(output, target)
            iou = iou_score(output, target)
            dice_1 = dice_coef(output, target)[0]
            dice_2 = dice_coef(output, target)[1]

        losses.update(loss.item(), input.size(0))
        ious.update(iou, input.size(0))
        dices_1s.update(torch.tensor(dice_1), input.size(0))
        dices_2s.update(torch.tensor(dice_2), input.size(0))

        # compute gradient and do optimizing step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    log = OrderedDict([('loss', losses.avg), ('iou', ious.avg),
                       ('dice_1', dices_1s.avg), ('dice_2', dices_2s.avg)])

    return log
示例#2
0
def train_single_epoch(config, model, dataloader, criterion, optimizer, log_train, epoch):
    batch_time = AverageMeter()
    losses = AverageMeter()
    scores = AverageMeter()
    score_1 = AverageMeter()
    score_2 = AverageMeter()
    score_3 = AverageMeter()
    score_4 = AverageMeter()

    model.train()

    end = time.time()
    for i, (images, labels) in enumerate(dataloader):
        optimizer.zero_grad()

        images = images.to(device)
        labels = labels.to(device)

        # logits = model(images)
        logits = model(images)['out']

        if config.LABEL_SMOOTHING:
            smoother = LabelSmoother()
            loss = criterion(logits, smoother(labels))
        else:
            loss = criterion(logits, labels)

        losses.update(loss.item(), images.shape[0])

        loss.backward()
        optimizer.step()

        preds = F.sigmoid(logits)

        score = dice_coef(preds, labels)
        score_1.update(score[0].item(), images.shape[0])
        score_2.update(score[1].item(), images.shape[0])
        score_3.update(score[2].item(), images.shape[0])
        score_4.update(score[3].item(), images.shape[0])
        scores.update(score.mean().item(), images.shape[0])

        batch_time.update(time.time() - end)
        end = time.time()

        if i % config.PRINT_EVERY == 0:
            print("[%d/%d][%d/%d] time: %.2f, loss: %.6f, score: %.4f [%.4f, %.4f, %.4f, %.4f], lr: %.6f"
                  % (epoch, config.TRAIN.NUM_EPOCHS, i, len(dataloader), batch_time.sum, loss.item(), score.mean().item(),
                     score[0].item(), score[1].item(), score[2].item(), score[3].item(),
                     optimizer.param_groups[0]['lr']))
                     # optimizer.param_groups[0]['lr'], optimizer.param_groups[1]['lr']))

        del images, labels, logits, preds
        torch.cuda.empty_cache()

    log_train.write('[%d/%d] loss: %.6f, score: %.4f, dice: [%.4f, %.4f, %.4f, %.4f], lr: %.6f\n'
                    % (epoch, config.TRAIN.NUM_EPOCHS, losses.avg, scores.avg, score_1.avg, score_2.avg, score_3.avg, score_4.avg,
                       optimizer.param_groups[0]['lr']))
                       # optimizer.param_groups[0]['lr'], optimizer.param_groups[1]['lr']))
    print('average loss over TRAIN epoch: %f' % losses.avg)
示例#3
0
    def run(self):

        self.logger.info('Testing started.')
        # start_tick = time.time()
        test_loader = self.dataset.get_loader(training=False)

        step = self.test_steps
        self.load_model(step)
        self.logger.info(f'[Step {step:d}] Model Loaded.')

        for test_idx, (image_tensor, label_tensor,
                       affine_tensor) in enumerate(test_loader):

            input_tensor = (image_tensor - tf.reduce_mean(image_tensor)
                            ) / tf.math.reduce_std(image_tensor)
            if self.hybrid:
                logit_tensor = self.chop_forward(input_tensor)
            else:
                inputs_list = decompose_vol2cube(tf.squeeze(input_tensor),
                                                 self.batch_size, 64, 1, 4)
                logits_list = [
                    self.model(x, training=False) for x in inputs_list
                ]
                logit_tensor = compose_prob_cube2vol(logits_list,
                                                     image_tensor.shape[1:4],
                                                     self.batch_size, 64, 4, 2)

            pred_tensor = tf.nn.softmax(logit_tensor)[..., 0:1]
            self.pr_meter.update_state(label_tensor, pred_tensor)
            self.dice_meter.update_state(label_tensor, pred_tensor)
            test_p, test_r = precision_recall(label_tensor, pred_tensor)
            test_dice = dice_coef(label_tensor, pred_tensor)

            self.logger.info(
                f'[Test {test_idx + 1}] Precision = {test_p:.4f}, Recall = {test_r:.4f}, Dice = {test_dice:.4f}.'
            )

            if self.export:
                save_image_nii(
                    image_tensor, affine_tensor,
                    os.path.join(self.result_root,
                                 f'{step}_{test_idx}_img.nii'))
                save_pred_nii(
                    pred_tensor, affine_tensor,
                    os.path.join(self.result_root,
                                 f'{step}_{test_idx}_pred.nii'))
                save_label_nii(
                    label_tensor, affine_tensor,
                    os.path.join(self.result_root,
                                 f'{step}_{test_idx}_gt.nii'))

        test_p, test_r = self.pr_meter.result()
        test_dice = self.dice_meter.result()
        self.logger.info(
            f'[Total Average] Precision = {test_p:.4f}, Recall = {test_r:.4f}, Dice = {test_dice:.4f}.'
        )
示例#4
0
def validate(args, val_loader, model, criterion):
    losses = AverageMeter()
    ious = AverageMeter()
    dices_1s = AverageMeter()
    dices_2s = AverageMeter()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        for i, (input, target) in tqdm(enumerate(val_loader),
                                       total=len(val_loader)):
            input = input.cuda()
            target = target.cuda()

            # compute output
            if args.deepsupervision:
                outputs = model(input)
                loss = 0
                for output in outputs:
                    loss += criterion(output, target)
                loss /= len(outputs)
                iou = iou_score(outputs[-1], target)
            else:
                output = model(input)
                loss = criterion(output, target)
                iou = iou_score(output, target)
                dice_1 = dice_coef(output, target)[0]
                dice_2 = dice_coef(output, target)[1]

            losses.update(loss.item(), input.size(0))
            ious.update(iou, input.size(0))
            dices_1s.update(torch.tensor(dice_1), input.size(0))
            dices_2s.update(torch.tensor(dice_2), input.size(0))

    log = OrderedDict([('loss', losses.avg), ('iou', ious.avg),
                       ('dice_1', dices_1s.avg), ('dice_2', dices_2s.avg)])

    return log
示例#5
0
    def run(self):

        self.logger.info('Testing started.')
        # start_tick = time.time()
        test_loader = self.dataset.get_loader(training=False)

        step = self.test_steps
        self.load_model(step)
        self.logger.info(f'[Step {step:d}] Model Loaded.')

        for test_idx, (image_tensor, label_tensor,
                       affine_tensor) in enumerate(test_loader):

            # pdb.set_trace()
            logit_tensor = self.chop_forward(image_tensor, label_tensor)
            pred_tensor = tf.nn.softmax(logit_tensor)[..., 0:1]

            print(
                f'positive predictions:{tf.math.count_nonzero(pred_tensor > 0.5).numpy()}.'
            )

            self.pr_meter.update_state(label_tensor, pred_tensor)
            self.dice_meter.update_state(label_tensor, pred_tensor)
            test_p, test_r = precision_recall(label_tensor, pred_tensor)
            test_dice = dice_coef(label_tensor, pred_tensor)

            if self.export:

                save_image_nii(
                    image_tensor, affine_tensor,
                    os.path.join(self.result_root,
                                 f'{step}_{test_idx + 1}_img.nii'))
                save_pred_nii(
                    pred_tensor, affine_tensor,
                    os.path.join(self.result_root,
                                 f'{step}_{test_idx + 1}_pred.nii'))
                save_label_nii(
                    label_tensor, affine_tensor,
                    os.path.join(self.result_root,
                                 f'{step}_{test_idx + 1}_gt.nii'))

            self.logger.info(
                f'[Test {test_idx + 1}] Precision = {test_p:.4f}, Recall = {test_r:.4f}, Dice = {test_dice:.4f}.'
            )

        test_p, test_r = self.pr_meter.result()
        test_dice = self.dice_meter.result()
        self.logger.info(
            f'[Total Average] Precision = {test_p:.4f}, Recall = {test_r:.4f}, Dice = {test_dice:.4f}.'
        )
示例#6
0
def evaluate_single_epoch(config, model, dataloader, criterion, log_val, epoch):
    batch_time = AverageMeter()
    losses = AverageMeter()
    scores = AverageMeter()
    score_1 = AverageMeter()
    score_2 = AverageMeter()
    score_3 = AverageMeter()
    score_4 = AverageMeter()

    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, labels) in enumerate(dataloader):
            images = images.to(device)
            labels = labels.to(device)

            # logits = model(images)
            logits = model(images)['out']

            loss = criterion(logits, labels)
            losses.update(loss.item(), images.shape[0])

            preds = F.sigmoid(logits)

            score = dice_coef(preds, labels)
            score_1.update(score[0].item(), images.shape[0])
            score_2.update(score[1].item(), images.shape[0])
            score_3.update(score[2].item(), images.shape[0])
            score_4.update(score[3].item(), images.shape[0])
            scores.update(score.mean().item(), images.shape[0])

            batch_time.update(time.time() - end)
            end = time.time()

            if i % config.PRINT_EVERY == 0:
                print('[%2d/%2d] time: %.2f, loss: %.6f, score: %.4f [%.4f, %.4f, %.4f, %.4f]'
                      % (i, len(dataloader), batch_time.sum, loss.item(), score.mean().item(), score[0].item(), score[1].item(), score[2].item(), score[3].item()))

            del images, labels, logits, preds
            torch.cuda.empty_cache()

        log_val.write('[%d/%d] loss: %.6f, score: %.4f [%.4f, %.4f, %.4f, %.4f]\n'
                      % (epoch, config.TRAIN.NUM_EPOCHS, losses.avg, scores.avg, score_1.avg, score_2.avg, score_3.avg, score_4.avg))
        print('average loss over VAL epoch: %f' % losses.avg)

    return scores.avg, losses.avg
示例#7
0
    def build_model(self):
        """
        Creates model

        :return:
        """
        """
        Helper Variables
        """
        self.global_step_tensor = tf.Variable(0,
                                              trainable=False,
                                              name='global_step')
        self.global_step_inc = self.global_step_tensor.assign(
            self.global_step_tensor + 1)
        self.global_epoch_tensor = tf.Variable(0,
                                               trainable=False,
                                               name='global_epoch')
        self.global_epoch_inc = self.global_epoch_tensor.assign(
            self.global_epoch_tensor + 1)
        """
        Inputs to the network
        """
        with tf.variable_scope('inputs'):
            self.x, self.y = self.data_loader.get_inputs()

            assert self.x.get_shape().as_list() == [
                None, self.config.image_size, self.config.image_size, 1
            ]

            self.is_training = tf.placeholder(tf.bool, name='Training_flag')

        tf.add_to_collection('inputs', self.x)
        tf.add_to_collection('inputs', self.y)
        tf.add_to_collection('inputs', self.is_training)
        """
        Network Architecture
        """
        out = self.x

        with tf.variable_scope('network'):
            """
            Encoder
            """
            out = self.conv_bn_relu(out,
                                    64,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv1_1')
            conv1 = self.conv_bn_relu(out,
                                      64,
                                      self.is_training,
                                      self.config.use_batch_norm,
                                      self.config.use_activation,
                                      name='conv1_2')
            pool1, pool1_ind = self.pool(conv1, name='pool1')

            out = self.conv_bn_relu(pool1,
                                    128,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv2_1')
            out = self.conv_bn_relu(out,
                                    128,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv2_2')
            pool2, pool2_ind = self.pool(out, name='pool2')

            out = self.conv_bn_relu(pool2,
                                    256,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv3_1')
            out = self.conv_bn_relu(out,
                                    256,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv3_2')
            pool3, pool3_ind = self.pool(out, name='pool3')

            out = self.conv_bn_relu(pool3,
                                    512,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv4_1')
            out = self.conv_bn_relu(out,
                                    512,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv4_2')
            pool4, pool4_ind = self.pool(out, name='pool4')
            """
            Bottleneck
            """
            out = self.conv_bn_relu(pool4,
                                    512,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv5_1')
            out = self.conv_bn_relu(out,
                                    512,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='conv5_2')
            """
            Decoder
            """
            out = self.unpool(out, pool4_ind, name='unpool4')
            out = self.conv_bn_relu(out,
                                    512,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='upconv4_2')
            out = self.conv_bn_relu(out,
                                    256,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='upconv4_1')

            out = self.unpool(out, pool3_ind, name='unpool3')
            out = self.conv_bn_relu(out,
                                    256,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='upconv3_2')
            out = self.conv_bn_relu(out,
                                    128,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='upconv3_1')

            out = self.unpool(out, pool2_ind, name='unpool2')
            out = self.conv_bn_relu(out,
                                    128,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='upconv2_2')
            out = self.conv_bn_relu(out,
                                    64,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='upconv2_1')

            out = self.unpool(out, pool1_ind, name='unpool1')
            # Skipped connection
            out = tf.concat([out, conv1], axis=-1, name='skipped')
            # ------------------
            out = self.conv_bn_relu(out,
                                    64,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    kernel_size=(1, 1),
                                    name='upconv1_2')
            out = self.conv_bn_relu(out,
                                    64,
                                    self.is_training,
                                    self.config.use_batch_norm,
                                    self.config.use_activation,
                                    name='upconv1_1')

            self.out = self.conv_predictor(out, use_activation=False)

            tf.add_to_collection('out', self.out)
        """
        Some operators for the training process
        """
        with tf.variable_scope('predictions'):
            self.predictions = tf.nn.sigmoid(self.out, name='pred')
            tf.add_to_collection('predictions', self.predictions)

        with tf.variable_scope('metrics'):
            self.loss = mt.dice_loss(y_true=self.y, y_pred=self.predictions)
            self.dice = mt.dice_coef(y_true=self.y, y_pred=self.predictions)
            self.iou = mt.mean_iou(y_true=self.y, y_pred=self.predictions)

        with tf.variable_scope('train_step'):
            if self.config.optimizer == 'Adam':
                self.optimizer = tf.train.AdamOptimizer(
                    learning_rate=self.config.learning_rate)
            elif self.config.optimizer == 'Momentum':
                self.optimizer = tf.train.MomentumOptimizer(
                    learning_rate=self.config.learning_rate,
                    momentum=self.config.momentum)
            else:
                self.optimizer = tf.train.AdamOptimizer(
                    learning_rate=self.config.learning_rate)

            if self.config.use_batch_norm:
                update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
                with tf.control_dependencies(update_ops):
                    self.train_step = self.optimizer.minimize(
                        self.loss, global_step=self.global_step_tensor)
            else:
                self.train_step = self.optimizer.minimize(
                    self.loss, global_step=self.global_step_tensor)

        tf.add_to_collection('train', self.train_step)
        tf.add_to_collection('train', self.loss)
        tf.add_to_collection('train', self.dice)
        tf.add_to_collection('train', self.iou)
示例#8
0
        image, label, patient_id, file_id = data["image"], data["label"], data[
            "patient_id"], data["file_id"]
        optimizer.zero_grad()
        x = Variable(image).cuda().float()
        y_ = Variable(label).cuda().float()

        #  predict
        y = model(x)
        loss = loss_func(y, y_)
        loss.backward()
        optimizer.step()

        cur_iter += 1

        dice_score = dice_coef(y, y_)

        n_batch_samples = int(image.size()[0])
        n_samples += n_batch_samples
        running_loss += loss.data * n_batch_samples
        running_dice += dice_score.data * n_batch_samples

        if (step == 0) or (step + 1) % 100 == 0:
            print('     > Step [%3d/%3d] Loss %.4f - Dice Coef %.4f' %
                  (step + 1, len(train_batch), running_loss / n_samples,
                   running_dice / n_samples))

    train_loss = running_loss / n_samples
    train_dice = running_dice / n_samples

    # ----------------------------- Validation Dataset -----------------------------
示例#9
0
def val_step(val_loader,
             model,
             criterion,
             segmentation_problem=False,
             masks_overlays=0,
             overlays_path="overlays",
             selected_class="",
             epoch=-1,
             lr=0):
    model.eval()
    if not segmentation_problem:
        val_loss, correct, total = 0, 0, 0
        y_true, y_pred = [], []
        with torch.no_grad():
            for batch_idx, (inputs, targets) in enumerate(val_loader):
                inputs, targets = inputs.cuda(), targets.cuda()
                outputs = model(inputs)
                # outputs = (nn.Sigmoid()(outputs) > 0.5).float()
                targets = targets.squeeze().type_as(outputs)
                loss = criterion(outputs, targets)

                val_loss += loss.item()

                y_true.extend(targets.tolist())
                y_pred.extend(outputs.tolist())

            # avg = "micro"
            val_loss = (val_loss / (batch_idx + 1))

            y_true = np.stack(y_true)  # (batch, classes)
            y_pred = np.stack(y_pred)  # (batch, classes)

            # global_auc = metrics.roc_auc_score(y_true, y_pred)
            auc_per_class = []
            for c in range(y_true.shape[1]):
                auc_per_class.append(
                    metrics.roc_auc_score(y_true[:, c], y_pred[:, c]))

            # val_precision_score = precision_score(y_true, y_pred, average=avg)
            # val_recall_score = recall_score(y_true, y_pred, average=avg)
            # val_f1_score = f1_score(y_true, y_pred, average=avg)
        # To generalize, as segmentation same number of outputs
        return val_loss, auc_per_class, None
        # return val_loss, val_recall_score, val_precision_score, val_recall_score, val_f1_score, None

    else:  # Segmentation problem
        val_loss, val_iou, val_dice, generated_masks, = 0, [], [], 0
        with torch.no_grad():
            for batch_idx, (inputs, _, masks, original_imgs, original_masks,
                            inputs_names) in enumerate(val_loader):
                inputs, masks = inputs.cuda(), masks.cuda()
                outputs = model(inputs)
                loss = criterion(outputs, masks)

                val_loss += loss.item()

                for indx, single_pred in enumerate(outputs):
                    original_mask = original_masks[indx]
                    original_h, original_w = original_mask.shape
                    resize_transform = albumentations.Resize(
                        original_h, original_w)
                    pred_mask = resize_transform(image=torch.sigmoid(
                        single_pred).squeeze(0).data.cpu().numpy())["image"]
                    binary_ground_truth = np.where(original_mask > 0.5, 1,
                                                   0).astype(np.int32)
                    binary_pred_mask = np.where(pred_mask > 0.5, 1,
                                                0).astype(np.int32)

                    tmp_iou = jaccard_coef(binary_ground_truth,
                                           binary_pred_mask)
                    val_iou.append(tmp_iou)
                    tmp_dice = dice_coef(binary_ground_truth, binary_pred_mask)
                    val_dice.append(tmp_dice)

                    if generated_masks < masks_overlays:
                        save_overlays(
                            original_imgs[indx], binary_ground_truth,
                            binary_pred_mask,
                            os.path.join(
                                overlays_path, selected_class, f"{lr}",
                                f"epoch{epoch}",
                                f"{inputs_names[indx].split('/')[-1]}"))
                        generated_masks += 1

            val_loss = (val_loss / (batch_idx + 1))
        # To generalize, as classification same number of outputs
        return val_loss, np.array(val_iou).mean(), None, None, None, np.array(
            val_dice).mean()