def train_epoch(current_epoch, seg_loss, ce_loss, seg_seesaw, model, optimizer,
                scheduler, train_data_loader):
    losses = AverageMeter()
    losses1 = AverageMeter()

    dices = AverageMeter()

    iterator = tqdm(train_data_loader)
    model.train()
    for i, sample in enumerate(iterator):
        imgs = sample["img"].cuda(non_blocking=True)
        msks = sample["msk"].cuda(non_blocking=True)
        lbl_msk = sample["lbl_msk"].cuda(non_blocking=True)

        out = model(imgs)

        # loss0 = seg_loss(out[:, 0, ...], msks[:, 0, ...])
        # loss1 = seg_loss(out[:, 1, ...], msks[:, 1, ...])
        # loss2 = seg_loss(out[:, 2, ...], msks[:, 2, ...])
        # loss3 = seg_loss(out[:, 3, ...], msks[:, 3, ...])
        # loss4 = seg_loss(out[:, 4, ...], msks[:, 4, ...])

        loss5 = ce_loss(out, lbl_msk)
        #loss5 = seg_seesaw(out, lbl_msk)
        loss = loss5
        #loss = 0.1 * loss0 + 0.1 * loss1 + 0.3 * loss2 + 0.3 * loss3 + 0.2 * loss4 + loss5 * 2

        with torch.no_grad():
            _probs = 1 - torch.sigmoid(out[:, 0, ...])
            dice_sc = 1 - dice_round(_probs, 1 - msks[:, 0, ...])

        losses.update(loss.item(), imgs.size(0))
        losses1.update(loss5.item(), imgs.size(0))

        dices.update(dice_sc, imgs.size(0))

        iterator.set_description(
            "epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}); cce_loss {loss1.val:.4f} ({loss1.avg:.4f}); Dice {dice.val:.4f} ({dice.avg:.4f})"
            .format(current_epoch,
                    scheduler.get_lr()[-1],
                    loss=losses,
                    loss1=losses1,
                    dice=dices))

        optimizer.zero_grad()
        # loss.backward()
        with amp.scale_loss(loss, optimizer) as scaled_loss:
            scaled_loss.backward()
        torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 0.999)
        optimizer.step()

    scheduler.step()

    print(
        "epoch: {}; lr {:.7f}; Loss {loss.avg:.4f}; CCE_loss {loss1.avg:.4f}; Dice {dice.avg:.4f}"
        .format(current_epoch,
                scheduler.get_lr()[-1],
                loss=losses,
                loss1=losses1,
                dice=dices))
Beispiel #2
0
def validate(net,
             data_loader,
             predictions_dir,
             visualizer_path,
             data_dir,
             truth_csv="oof_gt.txt"):
    os.makedirs(predictions_dir, exist_ok=True)
    dices = []
    with torch.no_grad():
        for sample in tqdm(data_loader):
            imgs = sample["image"].cuda().float()
            mask = sample["mask"].cuda().float()

            output = net(imgs)
            pred = torch.sigmoid(output)
            d = dice_round(pred[:, 10:11, ...], mask[:, 10:11, ...],
                           t=0.5).cpu().numpy()
            for i in range(d.shape[0]):
                dices.append(d[i])
                cv2.imwrite(
                    os.path.join(
                        predictions_dir,
                        sample["img_name"][i][:-4].replace("MS", "RGB") +
                        ".png"), pred[i, 10].cpu().numpy() * 255)
    pred_csv = os.path.join(
        Path(predictions_dir).parent,
        os.path.basename(predictions_dir) + ".txt")
    vectorize_dir(predictions_dir, pred_csv)
    apls = calculate_visualizer(visualizer_path,
                                truth_csv=truth_csv,
                                pred_path=pred_csv,
                                img_dir=data_dir)

    return np.mean(dices), apls
Beispiel #3
0
def validate(net, data_loader, predictions_dir, data_dir, visualizer_path):
    os.makedirs(predictions_dir, exist_ok=True)
    preds_dir = predictions_dir + "/predictions"
    os.makedirs(preds_dir, exist_ok=True)
    dices = []
    with torch.no_grad():
        for sample in tqdm(data_loader):
            imgs = sample["image"].cuda().float()
            mask = sample["mask"].cuda().float()

            output = net(imgs)
            binary_pred = torch.sigmoid(output)

            for i in range(output.shape[0]):
                d = dice_round(binary_pred[:, 0:1, :], mask[:, 0:1, ...], t=0.5).item()
                dices.append(d)
                cv2.imwrite(os.path.join(preds_dir, sample["img_name"][i] + ".png"),
                            (np.moveaxis(binary_pred[i].cpu().numpy(), 0, -1)[..., :3] * 255))
    f_score = calculate_metrics(fold_dir=predictions_dir,
                                visualizer_path=visualizer_path,
                                truth_csv=os.path.join(data_dir,
                                                       "SummaryData/SN6_Train_AOI_11_Rotterdam_Buildings.csv"),
                                img_dir=os.path.join(data_dir, "SAR-Intensity/"),
                                sar_orientations_csv=os.path.join(data_dir, "SummaryData/SAR_orientations.txt"))
    return np.mean(dices), f_score
Beispiel #4
0
def train_epoch(current_epoch, seg_loss, ce_loss, mse_loss, model, optimizer, scheduler, train_data_loader):
    losses = AverageMeter()
    losses1 = AverageMeter()
    losses2 = AverageMeter()
    losses3 = AverageMeter()
    losses4 = AverageMeter()

    dices = AverageMeter()

    iterator = tqdm(train_data_loader)
    model.train()
    scheduler.step(current_epoch)
    for i, sample in enumerate(iterator):
        imgs = sample["img"].cuda(non_blocking=True)
        msks = sample["msk"].cuda(non_blocking=True)
        msks_speed = sample["msk_speed"].cuda(non_blocking=True)
        lbls_speed = sample["lbl_speed"].cuda(non_blocking=True)
        msks_speed_cont = sample["msk_speed_cont"].cuda(non_blocking=True)
        
        out = model(imgs)

        loss1 = seg_loss(out[:, 0, ...], msks[:, 0, ...])
        loss2 = seg_loss(out[:, 1, ...], msks[:, 1, ...])

        loss3 = ce_loss(out[:, 3:, ...], lbls_speed)

        loss4 = mse_loss(out[:, 2:3, ...], msks_speed_cont)

        loss = 1.2 * loss1 + 0.05 * loss2 + 0.2 * loss3 + 0.1 * loss4

        for _i in range(3, 13):
            loss += 0.03 * seg_loss(out[:, _i, ...], msks_speed[:, _i-3, ...])

        with torch.no_grad():
            _probs = torch.sigmoid(out[:, 0, ...])
            dice_sc = 1 - dice_round(_probs, msks[:, 0, ...])

        losses.update(loss.item(), imgs.size(0))
        losses1.update(loss1.item(), imgs.size(0))
        losses2.update(loss2.item(), imgs.size(0))
        losses3.update(loss3.item(), imgs.size(0))
        losses4.update(loss4.item(), imgs.size(0))

        dices.update(dice_sc, imgs.size(0))

        iterator.set_description(
            "epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}); Loss1 {loss1.val:.4f} ({loss1.avg:.4f}); Loss2 {loss2.val:.4f} ({loss2.avg:.4f}); Loss3 {loss3.val:.4f} ({loss3.avg:.4f}); Loss4 {loss4.val:.4f} ({loss4.avg:.4f}); Dice {dice.val:.4f} ({dice.avg:.4f})".format(
                current_epoch, scheduler.get_lr()[-1], loss=losses, loss1=losses1, loss2=losses2, loss3=losses3, loss4=losses4, dice=dices))
        
        optimizer.zero_grad()
        loss.backward()
        # with amp.scale_loss(loss, optimizer) as scaled_loss:
        #     scaled_loss.backward()
        optimizer.step()

#         scheduler.step()

    print("epoch: {}; lr {:.7f}; Loss {loss.avg:.4f}; Loss1 {loss1.avg:.4f}; Loss2 {loss2.avg:.4f}; Loss3 {loss3.avg:.4f}; Loss4 {loss4.avg:.4f}; Dice {dice.avg:.4f}".format(
                current_epoch, scheduler.get_lr()[-1], loss=losses, loss1=losses1, loss2=losses2, loss3=losses3, loss4=losses4, dice=dices))
def train_epoch(current_epoch, loss_functions, model, optimizer, scheduler,
                train_data_loader, summary_writer, conf, local_rank):
    losses = AverageMeter()
    dices = AverageMeter()
    iterator = tqdm(train_data_loader)
    model.train()
    if conf["optimizer"]["schedule"]["mode"] == "epoch":
        scheduler.step(current_epoch)
    for i, sample in enumerate(iterator):
        imgs = sample["image"].cuda()[:, :3, :, :]
        masks = sample["mask"].cuda().float()
        out_mask = model(imgs)
        mask_band = 4
        with torch.no_grad():
            pred = torch.sigmoid(out_mask)
            d = dice_round(pred, masks[:, mask_band:, ...], t=0.5).item()
        dices.update(d, imgs.size(0))

        mask_loss = loss_functions["mask_loss"](out_mask,
                                                masks[:, mask_band:,
                                                      ...].contiguous())
        loss = mask_loss
        losses.update(loss.item(), imgs.size(0))
        iterator.set_description(
            "epoch: {}; lr {:.7f}; Loss ({loss.avg:.4f}); dice ({dice.avg:.4f}); "
            .format(current_epoch,
                    scheduler.get_lr()[-1],
                    loss=losses,
                    dice=dices))
        optimizer.zero_grad()
        if conf['fp16']:
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
        else:
            loss.backward()
        torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 1)
        optimizer.step()
        torch.cuda.synchronize()

        if conf["optimizer"]["schedule"]["mode"] in ("step", "poly"):
            scheduler.step(i + current_epoch * len(train_data_loader))

    if local_rank == 0:
        for idx, param_group in enumerate(optimizer.param_groups):
            lr = param_group['lr']
            summary_writer.add_scalar('group{}/lr'.format(idx),
                                      float(lr),
                                      global_step=current_epoch)
        summary_writer.add_scalar('train/loss',
                                  float(losses.avg),
                                  global_step=current_epoch)
def train_epoch(current_epoch, loss_function, l1_loss, model, optimizer, scheduler, train_data_loader):
    losses = AverageMeter()
    losses2 = AverageMeter()
    losses3 = AverageMeter()
    losses4 = AverageMeter()
    nadir_losses = AverageMeter()
    dices = AverageMeter()
    iterator = tqdm(train_data_loader)
    model.train()
    scheduler.step(current_epoch)
    for i, sample in enumerate(iterator):
        imgs = sample["img"].cuda(non_blocking=True)
        cat_inp = sample["cat_inp"].cuda(non_blocking=True)
        coord_inp = sample["coord_inp"].cuda(non_blocking=True)
        masks = sample["mask"].cuda(non_blocking=True)
        nadir = sample["nadir"].cuda(non_blocking=True)
        out, nadir_pred = model(imgs, nadir, cat_inp, coord_inp)

        loss1 = loss_function(out[:, 0, ...], masks[:, 0, ...])
        loss2 = loss_function(out[:, 1, ...], masks[:, 1, ...])
        loss3 = loss_function(out[:, 2, ...], masks[:, 2, ...])
        loss4 = loss_function(out[:, 3, ...], masks[:, 3, ...])
        nadir_loss = l1_loss(nadir_pred, nadir)
        loss = loss1 + 0.4 * loss2 + 0.05 * loss3 + 0.005 * loss4 + 0.002 * nadir_loss #  

        with torch.no_grad():
            _probs = torch.sigmoid(out[:, 0, ...])
            dice_sc = 1 - dice_round(_probs, masks[:, 0, ...])

        losses.update(loss1.item(), imgs.size(0))
        losses2.update(loss2.item(), imgs.size(0))
        losses3.update(loss3.item(), imgs.size(0))
        losses4.update(loss4.item(), imgs.size(0))
        nadir_losses.update(nadir_loss.item(), imgs.size(0))
        dices.update(dice_sc, imgs.size(0))
        iterator.set_description(
            "epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}); Loss2 {loss2.val:.4f} ({loss2.avg:.4f}); Loss3 {loss3.val:.4f} ({loss3.avg:.4f}); Loss4 {loss4.val:.4f} ({loss4.avg:.4f}); Dice {dice.val:.4f} ({dice.avg:.4f}); Nadir {nadir_loss.val:.4f} ({nadir_loss.avg:.4f})".format(
                current_epoch, scheduler.get_lr()[-1], loss=losses, loss2=losses2, loss3=losses3, loss4=losses4, dice=dices, nadir_loss=nadir_losses))
        
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    print("epoch: {}; lr {:.7f}; Loss {loss.avg:.4f}; Loss2 {loss2.avg:.4f}; Loss3 {loss3.avg:.4f}; Loss4 {loss4.avg:.4f}; Dice {dice.avg:.4f}; Nadir {nadir_loss.avg:.4f}".format(
                current_epoch, scheduler.get_lr()[-1], loss=losses, loss2=losses2, loss3=losses3, loss4=losses4, dice=dices, nadir_loss=nadir_losses))
Beispiel #7
0
def train_epoch(current_epoch, seg_loss, model, optimizer, scheduler,
                train_data_loader):
    losses = AverageMeter()

    dices = AverageMeter()

    iterator = tqdm(train_data_loader)
    model.train()
    for i, sample in enumerate(iterator):
        imgs = sample["img"].cuda(non_blocking=True)
        msks = sample["msk"].cuda(non_blocking=True)

        out = model(imgs)

        loss = seg_loss(out, msks)

        with torch.no_grad():
            _probs = torch.sigmoid(out[:, 0, ...])
            dice_sc = 1 - dice_round(_probs, msks[:, 0, ...])

        losses.update(loss.item(), imgs.size(0))

        dices.update(dice_sc, imgs.size(0))

        iterator.set_description(
            "epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}); Dice {dice.val:.4f} ({dice.avg:.4f})"
            .format(current_epoch,
                    scheduler.get_lr()[-1],
                    loss=losses,
                    dice=dices))

        optimizer.zero_grad()
        # loss.backward()
        with amp.scale_loss(loss, optimizer) as scaled_loss:
            scaled_loss.backward()
        torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), 1.1)
        optimizer.step()

    scheduler.step(current_epoch)

    print("epoch: {}; lr {:.7f}; Loss {loss.avg:.4f}; Dice {dice.avg:.4f}".
          format(current_epoch,
                 scheduler.get_lr()[-1],
                 loss=losses,
                 dice=dices))
def validate(net, data_loader, predictions_dir):
    os.makedirs(predictions_dir, exist_ok=True)
    preds_dir = predictions_dir + "/predictions"
    os.makedirs(preds_dir, exist_ok=True)
    dices = []
    with torch.no_grad():
        for sample in tqdm(data_loader):
            imgs = sample["image"].cuda().float()[:, :3, :, :]
            mask = sample["mask"].cuda().float()

            output = net(imgs)
            binary_pred = torch.sigmoid(output)

            for i in range(output.shape[0]):
                d = dice_round(binary_pred, mask[:, 4:, ...], t=0.5).item()
                dices.append(d)
                cv2.imwrite(
                    os.path.join(
                        preds_dir, "test_localization_" +
                        sample["img_name"][i] + "_prediction.png"),
                    (binary_pred[i, 0].cpu().numpy() > 0.5) * 1)
    return np.mean(dices)
def train_epoch(current_epoch, combo_loss, model, optimizer, scaler, train_data_loader):
    losses = AverageMeter()
    losses2 = AverageMeter()
    losses3 = AverageMeter()
    dices = AverageMeter()
    dices2 = AverageMeter()
    dices3 = AverageMeter()
    
    if args.local_rank == 0:
        iterator = tqdm(train_data_loader)
    else:
        iterator = train_data_loader
    model.train()

    _lr = optimizer.param_groups[0]['lr']

    for i, sample in enumerate(iterator):
        with torch.cuda.amp.autocast():
            imgs = sample["img"].cuda(non_blocking=True)
            otps = sample["msk"].cuda(non_blocking=True)

            res = model(imgs)

            loss1 = combo_loss(res[:, 0, ...], otps[:, 0, ...])
            loss2 = combo_loss(res[:, 1, ...], otps[:, 1, ...])
            loss3 = combo_loss(res[:, 2, ...], otps[:, 2, ...])
            
            loss = loss2  + loss1 * 0.25 + 0.25 * loss3

            with torch.no_grad():
                _probs = torch.sigmoid(res[:, 0, ...])
                dice_sc = 1 - dice_round(_probs, otps[:, 0, ...])
                _probs = torch.sigmoid(res[:, 1, ...])
                dice_sc2 = 1 - dice_round(_probs, otps[:, 1, ...])
                _probs = torch.sigmoid(res[:, 2, ...])
                dice_sc3 = 1 - dice_round(_probs, otps[:, 2, ...])

        if i % 5 == 0:
            if args.distributed:
                reduced_loss1 = reduce_tensor(loss1.data)
                reduced_loss2 = reduce_tensor(loss2.data)
                reduced_loss3 = reduce_tensor(loss3.data)
                reduced_dice = reduce_tensor(dice_sc)
                reduced_dice2 = reduce_tensor(dice_sc2)
                reduced_dice3 = reduce_tensor(dice_sc3)
            else:
                reduced_loss1 = loss1.data
                reduced_loss2 = loss2.data
                reduced_loss3 = loss3.data
                reduced_dice = dice_sc
                reduced_dice2 = dice_sc
                reduced_dice3 = dice_sc

            # to_python_float incurs a host<->device sync
            losses.update(to_python_float(reduced_loss1), imgs.size(0))
            losses2.update(to_python_float(reduced_loss2), imgs.size(0))
            losses3.update(to_python_float(reduced_loss3), imgs.size(0))
            dices.update(reduced_dice, imgs.size(0)) 
            dices2.update(reduced_dice2, imgs.size(0)) 
            dices3.update(reduced_dice3, imgs.size(0)) 

        if args.local_rank == 0:
            iterator.set_description(
                "epoch: {}; lr {:.7f}; Loss {loss.val:.4f} ({loss.avg:.4f}); Loss2 {loss2.val:.4f} ({loss2.avg:.4f}); Loss3 {loss3.val:.4f} ({loss3.avg:.4f}); dice {dices.val:.4f} ({dices.avg:.4f}); dice2 {dices2.val:.4f} ({dices2.avg:.4f}); dice3 {dices3.val:.4f} ({dices3.avg:.4f});".format(
                    current_epoch, _lr, loss=losses, loss2=losses2, loss3=losses3, dices=dices, dices2=dices2, dices3=dices3))


        optimizer.zero_grad()

        scaler.scale(loss).backward()
        scaler.unscale_(optimizer)
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.5)
        scaler.step(optimizer)
        scaler.update()

        torch.cuda.synchronize()


    if args.local_rank == 0:
        print("epoch: {}; lr {:.7f}; Loss {loss.avg:.4f}; Loss2 {loss2.avg:.4f}; Loss3 {loss3.avg:.4f}; Dice {dices.avg:.4f}; Dice2 {dices2.avg:.4f}; Dice3 {dices3.avg:.4f}".format(
                    current_epoch, _lr, loss=losses, loss2=losses2, loss3=losses3, dices=dices, dices2=dices2, dices3=dices3))