示例#1
0
def func_eval(model, criterion, val_dataset, val_loader, post_crf=False):
    print("Start validation.\n")
    model.eval()  # make model evaluation mode

    with torch.no_grad():
        n_class = 12
        total_loss_sum = 0
        mIoU_list = []
        hist = np.zeros((n_class, n_class))  # confusion matrix

        for step, (images, masks, img_info) in enumerate(val_loader):
            images = torch.stack(images).to(
                CFG.device)  # (batch, channel, height, width)
            masks = torch.stack(masks).long().to(
                CFG.device)  # (batch, channel, height, width)

            # forward pass (get logits)
            logits = model(images)

            # loss 계산 (cross entropy loss)
            loss = criterion(logits, masks)
            total_loss_sum += loss.item() * images.shape[0]

            # use softmax to get probability
            probs = F.softmax(logits, dim=1)
            probs = probs.data.cpu().numpy()

            # Postprocessing
            if post_crf:
                pool = mp.Pool(mp.cpu_count())
                images = images.data.cpu().numpy().astype(np.uint8).transpose(
                    0, 2, 3, 1)
                probs = pool.map(dense_crf_wrapper, zip(images, probs))
                pool.close()

            # get class index which has biggest probability
            preds = np.argmax(probs, axis=1)
            masks = masks.detach().cpu().numpy()

            hist = add_hist(hist, masks, preds, n_class=n_class)

            if step == 0:
                fig_mask = log_images(masks, preds, img_info)

            del images, masks, logits, probs, preds

        val_loss = total_loss_sum / len(val_dataset)

    acc, acc_cls, mIoU, iu, fwavacc = label_accuracy_score(hist)
    recycle = [
        'Background', 'UNKNOWN', 'General trash', 'Paper', 'Paper pack',
        'Metal', 'Glass', 'Plastic', 'Styrofoam', 'Plastic bag', 'Battery',
        'Clothing'
    ]
    mIoU_df = pd.DataFrame({'Recycle Type': recycle, 'IoU': iu})
    return val_loss, acc, mIoU, mIoU_df, fig_mask
示例#2
0
def train(num_epochs, model, data_loader, val_loader, val_every, device, file_name):
    learning_rate = 0.0001
    from torch.optim.swa_utils import AveragedModel, SWALR
    from torch.optim.lr_scheduler import CosineAnnealingLR
    from segmentation_models_pytorch.losses import SoftCrossEntropyLoss, JaccardLoss
    from adamp import AdamP

    criterion = [SoftCrossEntropyLoss(smooth_factor=0.1), JaccardLoss('multiclass', classes=12)]
    optimizer = AdamP(params=model.parameters(), lr=learning_rate, weight_decay=1e-6)
    swa_scheduler = SWALR(optimizer, swa_lr=learning_rate)
    swa_model = AveragedModel(model)
    look = Lookahead(optimizer, la_alpha=0.5)

    print('Start training..')
    best_miou = 0
    for epoch in range(num_epochs):
        hist = np.zeros((12, 12))
        model.train()
        for step, (images, masks, _) in enumerate(data_loader):
            loss = 0
            images = torch.stack(images)  # (batch, channel, height, width)
            masks = torch.stack(masks).long()  # (batch, channel, height, width)

            # gpu 연산을 위해 device 할당
            images, masks = images.to(device), masks.to(device)

            # inference
            outputs = model(images)
            for i in criterion:
                loss += i(outputs, masks)
            # loss 계산 (cross entropy loss)

            look.zero_grad()
            loss.backward()
            look.step()

            outputs = torch.argmax(outputs.squeeze(), dim=1).detach().cpu().numpy()
            hist = add_hist(hist, masks.detach().cpu().numpy(), outputs, n_class=12)
            acc, acc_cls, mIoU, fwavacc = label_accuracy_score(hist)
            # step 주기에 따른 loss, mIoU 출력
            if (step + 1) % 25 == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, mIoU: {:.4f}'.format(
                    epoch + 1, num_epochs, step + 1, len(data_loader), loss.item(), mIoU))

        # validation 주기에 따른 loss 출력 및 best model 저장
        if (epoch + 1) % val_every == 0:
            avrg_loss, val_miou = validation(epoch + 1, model, val_loader, criterion, device)
            if val_miou > best_miou:
                print('Best performance at epoch: {}'.format(epoch + 1))
                print('Save model in', saved_dir)
                best_miou = val_miou
                save_model(model, file_name = file_name)

        if epoch > 3:
            swa_model.update_parameters(model)
            swa_scheduler.step()
示例#3
0
def train(num_epochs, model, data_loader, val_loader, criterion, optimizer,
          saved_dir, val_every, device, file_name, n_class):
    print('Start training..')
    best_mIoU = 0
    for epoch in range(num_epochs):
        hist = np.zeros((n_class, n_class))
        model.train()
        for step, (images, masks, _) in enumerate(data_loader):
            # (batch, channel, height, width)
            images = torch.stack(images)
            # (batch, channel, height, width)
            masks = torch.stack(masks).long()

            # gpu 연산을 위해 device 할당
            images, masks = images.to(device), masks.to(device)

            # inference
            outputs = model(images)

            # loss 계산 (cross entropy loss)
            loss = criterion(outputs, masks)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            outputs = torch.argmax(outputs.squeeze(),
                                   dim=1).detach().cpu().numpy()
            hist = add_hist(hist,
                            masks.detach().cpu().numpy(),
                            outputs,
                            n_class=n_class)
            acc, acc_cls, mIoU, fwavacc = label_accuracy_score(hist)
            wandb.log({"loss": loss, "mIoU": mIoU})  # wandb 로그출력
            # step 주기에 따른 loss 출력
            if (step + 1) % 25 == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, mIoU:{:.4f}'.
                      format(epoch + 1, num_epochs, step + 1,
                             len(train_loader), loss.item(), mIoU))

        # validation 주기에 따른 loss 출력 및 best model 저장
        # mIoU에 따라 모델 저장
        if (epoch + 1) % val_every == 0:
            avrg_loss, val_mIoU = validation(epoch + 1, model, val_loader,
                                             criterion, device, n_class)
            if val_mIoU > best_mIoU:
                print('Best performance at epoch: {}'.format(epoch + 1))
                print('Save model in', saved_dir)
                best_mIoU = val_mIoU
                save_model(model, saved_dir, file_name)
            wandb.log({
                "val_loss": avrg_loss,
                "val_mIoU": val_mIoU,
                "best_mIoU": best_mIoU
            })
def validation(epoch, model, data_loader, criterion, device, n_class):
    print('Start validation #{}'.format(epoch))
    model.eval()
    with torch.no_grad():
        total_loss = 0
        cnt = 0
        mIoU_list = []
        hist = np.zeros((n_class, n_class))  # 중첩을위한 변수
        for step, (images, masks, _) in enumerate(data_loader):

            # (batch, channel, height, width)
            images = torch.stack(images)
            # (batch, channel, height, width)
            masks = torch.stack(masks).long()

            images, masks = images.to(device), masks.to(device)

            outputs = model(images)
            loss = criterion(outputs, masks)
            total_loss += loss
            cnt += 1

            outputs = torch.argmax(outputs.squeeze(),
                                   dim=1).detach().cpu().numpy()

            # 계산을 위한 중첩
            hist = add_hist(hist,
                            masks.detach().cpu().numpy(),
                            outputs,
                            n_class=n_class)

            # mIoU = label_accuracy_score(
            #     masks.detach().cpu().numpy(), outputs, n_class=12)[2]
            # mIoU_list.append(mIoU)

        # mIoU가 전체에대해 계산
        acc, acc_cls, mIoU, fwavacc = label_accuracy_score(hist)
        avrg_loss = total_loss / cnt
        print('Validation #{}  Average Loss: {:.4f}, mIoU: {:.4f}'.format(
            epoch, avrg_loss, mIoU))
    return avrg_loss, mIoU
def psudo_labeling(num_epochs, model, data_loader, val_loader,
                   unlabeled_loader, criterion, optimizer, device, n_class,
                   saved_dir, file_name, val_every):
    # Instead of using current epoch we use a "step" variable to calculate alpha_weight
    # This helps the model converge faster
    step = 100
    size = 256
    transform = A.Compose([A.Resize(256, 256)])
    preds_array = np.empty((0, size * size), dtype=np.long)
    file_name_list = []
    best_mIoU = 0
    model.train()
    for epoch in range(num_epochs):
        hist = np.zeros((n_class, n_class))
        for batch_idx, (imgs, image_infos) in enumerate(unlabeled_loader):

            # Forward Pass to get the pseudo labels
            # --------------------------------------------- test(unlabelse)를 모델에 통과
            model.eval()
            outs = model(torch.stack(imgs).to(device))
            oms = torch.argmax(outs.squeeze(), dim=1).detach().cpu().numpy()
            oms = torch.Tensor(oms)
            oms = oms.long()
            oms = oms.to(device)
            # --------------------------------------------- 학습

            model.train()
            # Now calculate the unlabeled loss using the pseudo label
            imgs = torch.stack(imgs)
            imgs = imgs.to(device)
            # preds_array = preds_array.to(device)

            output = model(imgs)

            unlabeled_loss = alpha_weight(step) * criterion(output, oms)

            # Backpropogate
            optimizer.zero_grad()
            unlabeled_loss.backward()
            optimizer.step()
            output = torch.argmax(output.squeeze(),
                                  dim=1).detach().cpu().numpy()
            hist = add_hist(hist,
                            oms.detach().cpu().numpy(),
                            output,
                            n_class=n_class)

            if (batch_idx + 1) % 25 == 0:
                acc, acc_cls, mIoU, fwavacc = label_accuracy_score(hist)
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, mIoU:{:.4f}'.
                      format(epoch + 1, num_epochs, batch_idx + 1,
                             len(unlabeled_loader), unlabeled_loss.item(),
                             mIoU))
            # For every 50 batches train one epoch on labeled data
            # 50배치마다 라벨데이터를 1 epoch학습
            if batch_idx % 50 == 0:

                # Normal training procedure
                for batch_idx, (images, masks, _) in enumerate(train_loader):
                    images = torch.stack(images)
                    # (batch, channel, height, width)
                    masks = torch.stack(masks).long()

                    # gpu 연산을 위해 device 할당
                    images, masks = images.to(device), masks.to(device)

                    output = model(images)
                    labeled_loss = criterion(output, masks)

                    optimizer.zero_grad()
                    labeled_loss.backward()
                    optimizer.step()

                # Now we increment step by 1
                step += 1

        if (epoch + 1) % val_every == 0:
            avrg_loss, val_mIoU = validation(epoch + 1, model, val_loader,
                                             criterion, device, n_class)
            if val_mIoU > best_mIoU:
                print('Best performance at epoch: {}'.format(epoch + 1))
                print('Save model in', saved_dir)
                best_mIoU = val_mIoU
                save_model(model, saved_dir, file_name)
            wandb.log({
                "val_loss": avrg_loss,
                "val_mIoU": val_mIoU,
                "best_mIoU": best_mIoU
            })

        model.train()
示例#6
0
def pseudo_labeling(num_epochs, model, data_loader, val_loader,
                    unlabeled_loader, device, val_every, file_name):
    # Instead of using current epoch we use a "step" variable to calculate alpha_weight
    # This helps the model converge faster
    from torch.optim.swa_utils import AveragedModel, SWALR
    from segmentation_models_pytorch.losses import SoftCrossEntropyLoss, JaccardLoss
    from adamp import AdamP

    criterion = [
        SoftCrossEntropyLoss(smooth_factor=0.1),
        JaccardLoss('multiclass', classes=12)
    ]
    optimizer = AdamP(params=model.parameters(), lr=0.0001, weight_decay=1e-6)
    swa_scheduler = SWALR(optimizer, swa_lr=0.0001)
    swa_model = AveragedModel(model)
    optimizer = Lookahead(optimizer, la_alpha=0.5)

    step = 100
    size = 256
    best_mIoU = 0
    model.train()
    print('Start Pseudo-Labeling..')
    for epoch in range(num_epochs):
        hist = np.zeros((12, 12))
        for batch_idx, (imgs, image_infos) in enumerate(unlabeled_loader):

            # Forward Pass to get the pseudo labels
            # --------------------------------------------- test(unlabelse)를 모델에 통과
            model.eval()
            outs = model(torch.stack(imgs).to(device))
            oms = torch.argmax(outs.squeeze(), dim=1).detach().cpu().numpy()
            oms = torch.Tensor(oms)
            oms = oms.long()
            oms = oms.to(device)

            # --------------------------------------------- 학습

            model.train()
            # Now calculate the unlabeled loss using the pseudo label
            imgs = torch.stack(imgs)
            imgs = imgs.to(device)
            # preds_array = preds_array.to(device)

            output = model(imgs)
            loss = 0
            for each in criterion:
                loss += each(output, oms)

            unlabeled_loss = alpha_weight(step) * loss

            # Backpropogate
            optimizer.zero_grad()
            unlabeled_loss.backward()
            optimizer.step()
            output = torch.argmax(output.squeeze(),
                                  dim=1).detach().cpu().numpy()
            hist = add_hist(hist,
                            oms.detach().cpu().numpy(),
                            output,
                            n_class=12)

            if (batch_idx + 1) % 25 == 0:
                acc, acc_cls, mIoU, fwavacc = label_accuracy_score(hist)
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, mIoU:{:.4f}'.
                      format(epoch + 1, num_epochs, batch_idx + 1,
                             len(unlabeled_loader), unlabeled_loss.item(),
                             mIoU))
            # For every 50 batches train one epoch on labeled data
            # 50배치마다 라벨데이터를 1 epoch학습
            if batch_idx % 50 == 0:

                # Normal training procedure
                for batch_idx, (images, masks, _) in enumerate(data_loader):
                    labeled_loss = 0
                    images = torch.stack(images)
                    # (batch, channel, height, width)
                    masks = torch.stack(masks).long()

                    # gpu 연산을 위해 device 할당
                    images, masks = images.to(device), masks.to(device)

                    output = model(images)

                    for each in criterion:
                        labeled_loss += each(output, masks)

                    optimizer.zero_grad()
                    labeled_loss.backward()
                    optimizer.step()

                # Now we increment step by 1
                step += 1

        if (epoch + 1) % val_every == 0:
            avrg_loss, val_mIoU = validation(epoch + 1, model, val_loader,
                                             criterion, device)
            if val_mIoU > best_mIoU:
                print('Best performance at epoch: {}'.format(epoch + 1))
                print('Save model in', saved_dir)
                best_mIoU = val_mIoU
                save_model(model, file_name=file_name)

        model.train()

        if epoch > 3:
            swa_model.update_parameters(model)
            swa_scheduler.step()
示例#7
0
def evaluate(args, model, criterions, dataloader):
    model.eval()
    epoch_loss = 0
    n_class = 12
    example_images = []
    with torch.no_grad():
        hist = np.zeros((n_class, n_class))
        miou_images = []
        for images, masks, _ in dataloader:

            images = torch.stack(images)  # (batch, channel, height, width)
            masks = torch.stack(
                masks).long()  # (batch, channel, height, width)

            images, masks = images.to(args.device), masks.to(args.device)

            outputs = model(images)
            flag = criterions[0]
            if flag == "+":
                loss = criterions[1](outputs, masks) + criterions[2](outputs,
                                                                     masks)
            elif flag == "-":
                loss = criterions[1](outputs, masks) - criterions[2](outputs,
                                                                     masks)
            else:
                loss = criterions[1](outputs, masks)
            epoch_loss += loss

            inputs_np = torch.clone(images).detach().cpu().permute(0, 2, 3,
                                                                   1).numpy()
            inputs_np = denormalize_image(inputs_np,
                                          mean=(0.4611, 0.4403, 0.4193),
                                          std=(0.2107, 0.2074, 0.2157))

            example_images.append(
                wb_mask(
                    inputs_np[0],
                    pred_mask=outputs.argmax(1)[0].detach().cpu().numpy(),
                    true_mask=masks[0].detach().cpu().numpy(),
                ))

            outputs = torch.argmax(outputs.squeeze(),
                                   dim=1).detach().cpu().numpy()

            hist = add_hist(hist,
                            masks.detach().cpu().numpy(),
                            outputs,
                            n_class=n_class)

            # 이미지별 miou 저장
            miou_list = get_miou(masks.detach().cpu().numpy(),
                                 outputs,
                                 n_class=n_class)
            miou_images.extend(miou_list)

        # metrics
        acc, acc_cls, miou, fwavacc = label_accuracy_score(hist)

        # 리더보드 miou
        lb_miou = np.nanmean(miou_images)

        print(f"acc:{acc:.4f}, acc_cls:{acc_cls:.4f}, fwavacc:{fwavacc:.4f}")

        # hist wandb에 저장
        summa = hist.sum(1).reshape(-1, 1)
        percent = hist / summa
        plt.figure(figsize=(10, 10))
        sns.heatmap(percent, annot=True, fmt=".2%", annot_kws={"size": 8})
        wandb.log({"percent_hist": wandb.Image(plt)}, commit=False)

    return (epoch_loss / len(dataloader)), lb_miou, miou, example_images