Beispiel #1
0
def calc_loss(pred, target, metrics, phase='train', bce_weight=0.5):
    bce = F.binary_cross_entropy_with_logits(pred, target)
    pred = torch.sigmoid(pred)

    # convering tensor to numpy to remove from the computationl graph
    if phase == 'test':
        pred = (pred > 0.50).float()  #with 0.55 is a little better
        dice = dice_loss(pred, target)
        jaccard_loss = metric_jaccard(pred, target)
        loss = bce * bce_weight + dice * (1 - bce_weight)

        metrics['bce'] = bce.data.cpu().numpy() * target.size(0)
        metrics['loss'] = loss.data.cpu().numpy() * target.size(0)
        metrics['dice'] = 1 - dice.data.cpu().numpy() * target.size(0)
        metrics[
            'jaccard'] = 1 - jaccard_loss.data.cpu().numpy() * target.size(0)
    else:
        dice = dice_loss(pred, target)
        jaccard_loss = metric_jaccard(pred, target)
        loss = bce * bce_weight + dice * (1 - bce_weight)
        metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
        metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
        metrics['dice_loss'] += dice.data.cpu().numpy() * target.size(0)
        metrics['jaccard_loss'] += jaccard_loss.data.cpu().numpy(
        ) * target.size(0)

    return loss
def calc_loss_paral(pred_hr,
                    target_hr,
                    pred_vhr_lab,
                    target_vhr_lab,
                    pred_vhr_unlab,
                    target_vhr_unlab_up,
                    metrics,
                    weight_loss=0.1,
                    bce_weight=0.5):

    #loss for HR model
    bce_l1 = F.binary_cross_entropy_with_logits(pred_hr, target_hr)
    pred_hr = torch.sigmoid(pred_hr)

    dice_l1 = dice_loss(pred_hr, target_hr)
    loss_l1 = bce_l1 * bce_weight + dice_l1 * (1 - bce_weight)

    #loss for HR model label
    bce_l2 = F.binary_cross_entropy_with_logits(pred_vhr_lab, target_vhr_lab)
    pred_vhr_lab = torch.sigmoid((pred_vhr_lab))

    dice_l2 = dice_loss(pred_vhr_lab, target_vhr_lab)
    loss_l2 = bce_l2 * bce_weight + dice_l2 * (1 - bce_weight)

    #loss for VHR model unlabel
    bce_l3 = F.binary_cross_entropy_with_logits(pred_vhr_unlab,
                                                target_vhr_unlab_up)
    pred_vhr_unlab = torch.sigmoid((pred_vhr_unlab))
    dice_l3 = dice_loss(pred_vhr_unlab, target_vhr_unlab_up)
    loss_l3 = bce_l3 * bce_weight + dice_l3 * (1 - bce_weight)

    #loss for full-network
    loss = (loss_l1 + loss_l2 + loss_l3 * weight_loss)

    metrics['loss_HR'] += loss_l1.data.cpu().numpy() * target_hr.size(0)
    metrics['loss_VHR_lab'] += loss_l2.data.cpu().numpy(
    ) * target_vhr_lab.size(0)
    metrics['loss_VHR_unlab'] += loss_l3.data.cpu().numpy(
    ) * target_vhr_unlab_up.size(0)
    metrics['loss'] += loss.data.cpu().numpy() * (
        target_vhr_lab.size(0) + target_vhr_unlab_up.size(0)
    )  #* target.size(0)
    metrics['loss_dice_lb'] += dice_l2.data.cpu().numpy(
    ) * target_vhr_lab.size(0)
    metrics['loss_dice_unlab'] += dice_l3.data.cpu().numpy(
    ) * target_vhr_unlab_up.size(0)  #cambiar por hr_label

    return loss
Beispiel #3
0
def calc_loss(pred, target, metrics, bce_weight=0.5):
    bce = F.binary_cross_entropy_with_logits(pred, target)

    pred = torch.sigmoid(pred)

    dice = dice_loss(pred, target)

    loss = bce * bce_weight + dice * (1 - bce_weight)

    pred_flat = pred.view(-1).data.cpu().numpy()
    target_flat = target.view(-1).data.cpu().numpy()

    # pred_flat[pred_flat>=0.5]=1
    # pred_flat[pred_flat < 0.5] = 0

    #pred_flat = pred_flat//255
    # acc = np.sum(pred_flat==target_flat)/pred_flat.shape[0]
    # f1score = f1_score(pred_flat,target_flat)
    #pixel_acc = torch.true_divide(torch.sum(pred_flat==target.view(-1)),pred.view(-1).shape[0])

    metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
    metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
    metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
    #metrics['pixel_acc'] = acc
    #metrics['f1_score'] = f1score

    return loss
Beispiel #4
0
    def __init__(self, config, train_loader, valid_loader, test_loader):

        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.test_loader = test_loader

        self.net = None
        self.optimizer = None

        self.criterion = dice_loss(scale=1)
        #self.criterion = torch.nn.BCELoss()

        self.lr = config.lr
        self.beta1 = config.beta1
        self.beta2 = config.beta2
        if config.train_dataset == 'african':
            self.img_size = (640, 640)
        elif config.train_dataset == 'asian':
            self.img_size = (640, 480)
        elif config.train_dataset == 'mobile':
            self.img_size = (384, 384)

        self.num_epochs = config.num_epochs
        self.batch_size = config.batch_size
        self.train_dataset = config.train_dataset
        self.model_path = config.model_path
        self.result_path = config.result_path + '/' + config.train_dataset
        self.mode = config.mode

        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')

        self.best_mIoU = 0
        self.testdata_augmentation = T.Compose([T.ToTensor()])
        self.build_model()
Beispiel #5
0
def run_cv(img_size, pre_trained):
    image_files = get_img_files()
    kf = KFold(n_splits=N_CV, random_state=RANDOM_STATE, shuffle=True)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    for n, (train_idx, val_idx) in enumerate(kf.split(image_files)):
        train_files = image_files[train_idx]
        val_files = image_files[val_idx]

        writer = SummaryWriter()

        def on_after_epoch(m, df_hist):
            save_best_model(n, m, df_hist)
            write_on_board(writer, df_hist)
            log_hist(df_hist)

        criterion = dice_loss(scale=2)
        data_loaders = get_data_loaders(train_files, val_files, img_size)
        trainer = Trainer(data_loaders, criterion, device, on_after_epoch)

        model = MobileNetV2_unet(pre_trained=pre_trained)
        model.to(device)
        optimizer = Adam(model.parameters(), lr=LR)

        hist = trainer.train(model, optimizer, num_epochs=N_EPOCHS)
        hist.to_csv('{}/{}-hist.csv'.format(OUT_DIR, n), index=False)

        writer.close()

        break
Beispiel #6
0
def train():
    json_file = open("parameters.json")
    parameters = json.load(json_file)
    json_file.close()
    net = VNet()
    net = torch.nn.DataParallel(net, device_ids=[0, 1, 2, 3])
    net.cuda()
    cudnn.benchmark = True

    optimizer = torch.optim.Adam(net.parameters(), lr = parameters["lr"])
    criterion = nn.BCELoss()
    promise_dataset = PromiseDataset(is_train = True)
    train_loader = torch.utils.data.DataLoader(dataset = promise_dataset, batch_size = parameters["batch_size"])
    for epoch in range(parameters["num_epochs"]):
        net.train()
        for i, (data, label) in enumerate(train_loader):
            optimizer.zero_grad()
            output = net(data)
            loss = dice_loss(output, label)
            loss.backward()
            optimizer.step()

        print ('Epoch [{}/{}], Loss: {:.4f}' .format(epoch+1, parameters["num_epochs"], loss.item()))

    torch.save(net.state_dict(), "weights/promise12_weight.pth")
Beispiel #7
0
def grad(model, inputs, targets):
    with tf.GradientTape() as t:
        model_output = model(inputs)
        current_dice_loss = loss_lib.dice_loss(targets[0], model_output[0])
        current_bboxes_loss = loss_lib.masked_mae_loss(targets[1], model_output[1])
        total_loss = current_dice_loss + current_bboxes_loss
        total_grad = t.gradient(total_loss, model.trainable_variables)
    return current_dice_loss, current_bboxes_loss, total_loss, total_grad
Beispiel #8
0
def train_one_epoch(model, optimizer, data_loader, device, epoch, num_epochs, tb_writer):
    model.train()
    losses = np.array([])
    metrics = np.array([])
    bi0 = epoch * len(data_loader)  # batch index

    logger.info(('\n' + '%10s' * 2) % ('Epoch', 'loss'))
    pbar = tqdm(enumerate(data_loader), total=len(data_loader))
    s = ('%10s' + '%10.4f') % (
        '-/%g' % (num_epochs - 1), 0.0)
    pbar.set_description(s)
    for i, (imgs, targets) in pbar:
        imgs, targets = imgs.to(device), targets.to(device)
        if opt.model == 'deeplabv3':
            preds = model(imgs)['out']
            targets = targets.long()
        elif opt.model == 'hrnet_w18' or opt.model == 'hrnet_w48':
            preds = model(imgs)
            h, w = preds.shape[2], preds.shape[3]
            targets = F.interpolate(targets.float(), size=(h, w), mode='nearest').long()
        elif opt.model == 'dilated_unet':
            preds = model(imgs)
            targets = targets.long()

        if opt.loss == 'ce':
            loss = ce_loss(targets, preds)
        elif opt.loss == 'dice':
            loss = dice_loss(targets, preds)
        elif opt.loss == 'jaccard':
            loss = jaccard_loss(targets, preds)
        else:
            logger.critical('unsupported loss function')
            exit(1)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        with torch.no_grad():
            # cv2_imshow(imgs[0], preds[0])
            losses = np.append(losses, loss.item())
            # metrics = np.append(metrics, metric)
            # avg_loss = moving_avg(losses, 10)
            # avg_metric = moving_avg(metrics, 10)
            # print('[{:d}]{:d}/{:d}: loss={:4f}, dice={:4f}'.format(epoch, i + 1, len(data_loader),
            #                                                        avg_loss, avg_metric))
            # logger.info('[{:d}]{:d}/{:d}: dice loss={:4f}'.format(epoch, i + 1, len(data_loader), loss.item()))
            s = ('%10s' + '%10.4f') % (
                '%g/%g' % (epoch, num_epochs - 1), loss.item())
            pbar.set_description(s)
            bi = bi0 + i
            tb_writer.add_scalar('train_batch_loss', loss.item(), bi)

    epoch_loss = losses.mean()
    # epoch_recent_metric = moving_avg(metrics, 10)

    return epoch_loss
Beispiel #9
0
def calc_loss(pred, target, metrics, bce_weight=0.5):
    bce = F.binary_cross_entropy_with_logits(pred.float(), target.float())

    pred = torch.sigmoid(pred)
    dice = dice_loss(pred, target)

    loss = bce * bce_weight + dice * (1 - bce_weight)

    metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
    metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
    metrics['loss'] += loss.data.cpu().numpy() * target.size(0)

    return loss
def calc_loss(pred, target, metrics, bce_weight=0.5):
    bce = F.binary_cross_entropy_with_logits(pred, target)

    pred = torch.sigmoid(pred)
    dice = dice_loss(pred, target)
    iou_score = iou(pred, target)
    aed_score = aed(pred, target)

    loss = bce * bce_weight + dice * (1 - bce_weight)

    metrics['iou'] += iou_score.sum()
    metrics['aed'] += aed_score.sum()
    metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
    metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
    metrics['loss'] += loss.data.cpu().numpy() * target.size(0)

    return loss
Beispiel #11
0
def calc_loss(pred, target, metrics, bce_weight=0.5):
    
    pred=pred.type('torch.FloatTensor')
    target=target.type('torch.FloatTensor')

    bce = F.binary_cross_entropy_with_logits(pred, target)

    pred = F.sigmoid(pred)
    dice = dice_loss(pred, target)
    gc_loss  = cal_gc(pred,target)
    loss = bce * bce_weight + dice * (1 - bce_weight)+gc_loss
    
    metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
    metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
    metrics['gc_loss'] += gc_loss.data.cpu().numpy() * target.size(0)
    metrics['loss'] += loss.data.cpu().numpy() * target.size(0)
    
    return loss
Beispiel #12
0
def calc_loss(pred, target, metrics, bce_weight=0.5):
    """
	Validate F.binary_cross_entropy_with_logits(pred, target, reduction='none').mean() and 
	F.binary_cross_entropy_with_logits(pred, target, reduction='mean') are same:
	Rsult : Validated binary_cross_entropy_with_logits_test.py
	"""
    bce = F.binary_cross_entropy_with_logits(pred, target, reduction='mean')

    pred = torch.sigmoid(pred)
    dice = dice_loss(pred, target)

    loss = bce * bce_weight + dice * (1 - bce_weight)

    metrics['bce'] += bce.data.cpu().numpy() * target.size(0)
    metrics['dice'] += dice.data.cpu().numpy() * target.size(0)
    metrics['loss'] += loss.data.cpu().numpy() * target.size(0)

    return loss
Beispiel #13
0
    def __init__(self,
                 model_root_channel=8,
                 img_size=256,
                 batch_size=20,
                 n_channel=1,
                 n_class=2):

        self.drop_rate = tf.placeholder(tf.float32)
        self.training = tf.placeholder(tf.bool)

        self.batch_size = batch_size
        self.model_channel = model_root_channel

        self.X = tf.placeholder(tf.float32,
                                [None, img_size, img_size, n_channel],
                                name='X')
        self.Y = tf.placeholder(tf.float32,
                                [None, img_size, img_size, n_class],
                                name='Y')

        self.logits = self.neural_net()
        self.foreground_predicted, self.background_predicted = tf.split(
            self.logits, [1, 1], 3)
        self.foreground_truth, self.background_truth = tf.split(
            self.Y, [1, 1], 3)

        # # Cross_Entropy
        # self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.Y))

        self.loss = loss.dice_loss(output=self.logits, target=self.Y)

        # self.accuracy = layer.iou_coe(output=self.logits, target=self.Y)
        with tf.name_scope('Metrics'):
            self.accuracy = layer.mean_iou(self.foreground_predicted,
                                           self.foreground_truth)

        # TB
        tf.summary.scalar('loss', self.loss)
        tf.summary.scalar('accuracy', self.accuracy)
    run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0

save_dir = os.path.join(save_dir_root, 'run')

# Network definition
# if backbone == 'xception':
#     net = deeplab_xception.DeepLabv3_plus(nInputChannels=3, n_classes=12, os=16, pretrained=False)
# elif backbone == 'resnet':
#     net = deeplab_resnet.DeepLabv3_plus(nInputChannels=3, n_classes=12, os=16, pretrained=False)
# else:
#     raise NotImplementedError

net = cenet.CE_Net_backbone_DAC_with_inception(num_classes=1, num_channels=3)

modelName = 'CEnet-' + backbone + '-voc'
criterion_mask = loss.dice_loss()
# criterion_edge = loss.weighted_cross_entropy(num_classes=12)
# criterion = loss.FocalLoss(gamma=2)

print("Backbone network is {}".format(backbone))

if resume_epoch == 0:
    print("Training deeplabv3+ from scratch...")
else:
    print("Initializing weights from: {}...".format(
        os.path.join(save_dir, 'models',
                     modelName + '_epoch-' + str(resume_epoch - 1) + '.pth')))
    net.load_state_dict(
        torch.load(os.path.join(
            save_dir, 'models',
            modelName + '_epoch-' + str(resume_epoch - 1) + '.pth'),
Beispiel #15
0
    net.train()

    for i, data in enumerate(train_data):

        # wrap data in Variables
        inputs, labels = data
        if cuda: inputs, labels = inputs.cuda(), labels.cuda()
        inputs, labels = Variable(inputs), Variable(labels)

        # forward pass and loss calculation
        outputs = net(inputs)

        # get either dice loss or cross-entropy
        if dice:
            outputs = outputs[:, 1, :, :].unsqueeze(dim=1)
            loss = dice_loss(outputs, labels)
        else:
            labels = labels.squeeze(dim=1)
            loss = criterion(outputs, labels)

        # empty gradients, perform backward pass and update weights
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # save and print statistics
        running_loss += loss.data[0]

    # print statistics
    if dice:
        print('  [epoch %d] - train dice loss: %.3f' %
Beispiel #16
0
train_data = Dataset_brain_4(file_path)
batch_size = 64
train_loader = data.DataLoader(dataset=train_data,
                               batch_size=batch_size,
                               shuffle=True,
                               drop_last=True,
                               num_workers=4)
unet = Unet(4)
optimizer = torch.optim.Adam(unet.parameters(), lr=0.001)
unet.cuda()
unet.train()
EPOCH = 30
print(EPOCH)
for epoch in range(EPOCH):
    batch_score = 0
    num_batch = 0
    for i, (img, label) in enumerate(train_loader):
        seg = unet(img.float().cuda())
        loss = dice_loss(seg, label.float().cuda())
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        seg = seg.cpu()
        seg[seg >= 0.5] = 1.
        seg[seg != 1] = 0.
        batch_score += dice_score(seg, label.float()).data.numpy()
        num_batch += img.size(0)
    batch_score /= num_batch
    print('EPOCH %d : train_score = %.4f' % (epoch, batch_score))
torch.save(unet.state_dict(), model_save)
Beispiel #17
0
        out_src, _ = discriminator(x_hat, t2.float().cuda())
        d_loss_gp = gradient_penalty(out_src, x_hat)
        #         d_loss_gp.backward(retain_graph=True)
        d_loss = d_loss_real + d_loss_fake + LAMBDA_CLS * d_loss_cls + LAMBDA_GP * d_loss_gp
        optimizer_d.zero_grad()
        d_loss.backward()
        optimizer_d.step()
        ############################################## generator
        fake = generator(t2.float().cuda(), label)
        out_src, out_cls = discriminator(fake, t2.float().cuda())
        g_loss_fake = -torch.mean(out_src.sum([1, 2, 3]))
        g_loss_cls = classification_loss(out_cls, label)
        g_loss_rec = torch.mean(
            torch.abs(real.float().cuda() - fake).sum([1, 2, 3]))
        pred = unet(fake, label)
        g_loss_seg = dice_loss(pred, seg.float().cuda())
        g_loss = g_loss_fake + LAMBDA_CLS * g_loss_cls + g_loss_rec * LAMBDA_REC + g_loss_seg * LAMBDA_SEG
        optimizer_g.zero_grad()
        g_loss.backward()
        optimizer_g.step()
        ############################################## segmentor
        fake = generator(t2.float().cuda(), label)
        pred = unet(fake.detach(), label)
        pred_2 = unet(real.float().cuda(), label)
        s_loss = dice_loss(pred, seg.float().cuda())
        s_loss_2 = dice_loss(pred_2, seg.float().cuda())
        s_loss = 0.7 * s_loss + 0.3 * s_loss_2
        optimizer_s.zero_grad()
        s_loss.backward()
        optimizer_s.step()
Beispiel #18
0
            loss_ = 0.
            tm_ = 0.
            tj_ = 0.
            th_ = 0.
            acc_ = 0.
            
            progress.set_description('Epoch: %s' % str(ep+1))

            for idx, batch_data in enumerate(train):
                X, y_mask, y_joint, y_height = batch_data['img'].cuda(), batch_data['mask'].cuda(), batch_data['joint'].cuda(), batch_data['height'].cuda()

                optimizer.zero_grad()
                
                mask_o, joint_o, height_o = net(X)
                
                loss_m = w1_loss * (dice_loss(mask_o, y_mask, 0) + dice_loss(mask_o, y_mask, 1))/2
                loss_j = w2_loss * nn.CrossEntropyLoss()(joint_o, y_joint)  
                loss_h = w3_loss * height_loss(height_o, y_height)

                loss = loss_h + loss_m + loss_j  
                
                pred = torch.argmax(height_o, 1)
                
                loss.backward()
                optimizer.step()
                
                progress.update(1)
                
                loss_ += loss.item()
                tm_ += loss_m.item()
                tj_ += loss_j.item()