Esempio n. 1
0
def test(net, epoch_num):

    test_input_images_list = glob.glob(opt.test_input_images_dir + '*.jpg')
    num_samples = int(opt.dataset_frac / 2 * len(test_input_images_list))
    test_input_images_list = test_input_images_list[:num_samples]

    print("number of files in test dataset: {}".format(
        len(test_input_images_list)))

    duts_test_dataset = DUTS_SOD_dataset(
        input_image_name_list=test_input_images_list, scale=opt.scale)
    test_dataloader = DataLoader(duts_test_dataset,
                                 batch_size=opt.batch_size_test,
                                 shuffle=False)

    net.eval()
    loss = np.zeros(len(dataloader))
    MAE_test_set_in_epoch = []
    F_beta_test_set_in_epoch = []
    relaxed_F_beta_test_set_in_epoch = []

    t = tqdm(iter(test_dataloader), leave=True, total=len(test_dataloader))

    for idx, data in enumerate(t):

        guidance, gt = data['guidance'].to(device), data['gt'].to(device)
        out, out_sig, out_lr_up, out_lr_up_sig, _ = net(guidance)

        loss[idx] = opt.w_hr*h_loss.hybrid_loss_saliency(out_sig, gt, opt.w_bce, opt.w_ssim, opt.w_iou) +\
                    opt.w_lr*h_loss.hybrid_loss_saliency(out_lr_up_sig, gt, opt.w_bce, opt.w_ssim, opt.w_iou)

        t.refresh()

        if idx == 0 or idx == 1 or idx == 2 or idx == 5 or idx == 9 or idx == 10:
            if opt.save_sample_test:
                cv2.imwrite(
                    result_root + "/epoch_" + str(epoch_num + 1) + "_test_" +
                    str(idx) + "_output_hr_sigmoid.png",
                    sod_utils.normPRED(
                        out_sig[0, 0].detach()).cpu().numpy().astype('float32')
                    * 255)
                cv2.imwrite(
                    result_root + "/test_" + str(idx) + "_gt.png",
                    gt[0, 0].detach().cpu().numpy().astype('float32') * 255)
                cv2.imwrite(
                    result_root + "/epoch_" + str(epoch_num + 1) + "_test_" +
                    str(idx) + "_output_lr_sigmoid.png",
                    sod_utils.normPRED(out_lr_up_sig[
                        0, 0].detach()).cpu().numpy().astype('float32') * 255)
                cv2.imwrite(
                    result_root + "/test_" + str(idx) + "_guide_rgb.png",
                    guidance[0, 0].detach().cpu().numpy().astype('float32') *
                    255)

        for i in range(gt.size(1)):
            MAE, F_beta, relaxed_F_beta = sod_utils.calc_eval_measures(
                gt[i, 0, :, :].detach().cpu().numpy(),
                out_sig[i, 0, :, :].detach().cpu().numpy())
            MAE_test_set_in_epoch.append(MAE)
            F_beta_test_set_in_epoch.append(F_beta)
            relaxed_F_beta_test_set_in_epoch.append(relaxed_F_beta)

        t.set_description('[validate epoch:%d] loss: %.8f ... ' \
                          'avg MAE: %.8f ...  ' \
                          'max F_beta: %.8f ... ' \
                          'avg relax F_beta: %.8f' % \
                          (epoch_num + 1, loss[:idx+1].mean(), sum(MAE_test_set_in_epoch) / len(MAE_test_set_in_epoch),
                           max(F_beta_test_set_in_epoch),
                           sum(relaxed_F_beta_test_set_in_epoch) / len(relaxed_F_beta_test_set_in_epoch)))

    max_MAE_test_set_per_epoch = max(MAE_test_set_in_epoch)
    avg_MAE_test_set_per_epoch = sum(MAE_test_set_in_epoch) / len(
        MAE_test_set_in_epoch)
    max_F_beta_test_set_per_epoch = max(F_beta_test_set_in_epoch)
    avg_F_beta_test_set_per_epoch = sum(F_beta_test_set_in_epoch) / len(
        F_beta_test_set_in_epoch)
    max_relaxed_F_beta_test_set_per_epoch = max(
        relaxed_F_beta_test_set_in_epoch)
    avg_relaxed_F_beta_test_set_per_epoch = sum(
        relaxed_F_beta_test_set_in_epoch) / len(
            relaxed_F_beta_test_set_in_epoch)

    return loss, max_MAE_test_set_per_epoch, avg_MAE_test_set_per_epoch, \
        max_F_beta_test_set_per_epoch, avg_F_beta_test_set_per_epoch, \
        max_relaxed_F_beta_test_set_per_epoch, avg_relaxed_F_beta_test_set_per_epoch
Esempio n. 2
0
    avg_relaxed_F_beta_train_set_in_epoch = 0
    relaxed_F_beta_test_set_in_epoch = []
    max_relaxed_F_beta_test_set_in_epoch = 0
    avg_relaxed_F_beta_test_set_in_epoch = 0

    t = tqdm(iter(dataloader), leave=True, total=len(dataloader))

    for idx, data in enumerate(t):
        optimizer.zero_grad()
        scheduler.step()
        guidance, lr, gt = data['guidance'].to(device), data['lr'].to(
            device), data['gt'].to(device)

        out, out_sig = net((guidance, lr))

        loss = h_loss.hybrid_loss_saliency(out_sig, torch.sigmoid(gt))
        loss.backward()
        optimizer.step()
        running_loss += loss.data.item()

        for i in range(gt.size(1)):
            MAE, F_beta, relaxed_F_beta = sod_utils.calc_eval_measures(
                gt[i, 0, :, :].detach().cpu().numpy(),
                out[i, 0, :, :].detach().cpu().numpy())
            MAE_train_set_in_epoch.append(MAE)
            F_beta_train_set_in_epoch.append(F_beta)
            relaxed_F_beta_train_set_in_epoch.append(relaxed_F_beta)

        if idx % int(len(dataloader) * 0.1) == 0:
            loss_for_log = running_loss / (idx + 1)
            t.set_description(
Esempio n. 3
0
        max_relaxed_F_beta_train_set_in_epoch = 0
        avg_relaxed_F_beta_train_set_in_epoch = 0
        relaxed_F_beta_test_set_in_epoch = []
        max_relaxed_F_beta_test_set_in_epoch = 0
        avg_relaxed_F_beta_test_set_in_epoch = 0

        t = tqdm(iter(dataloader), leave=True, total=len(dataloader))

        for idx, data in enumerate(t):
            optimizer.zero_grad()
            scheduler.step()
            guidance, gt = data['guidance'].to(device), data['gt'].to(device)

            out, out_sig, out_lr_up, out_lr_up_sig, _ = net(guidance)

            loss_hr = h_loss.hybrid_loss_saliency(out_sig, gt, opt.w_bce,
                                                  opt.w_ssim, opt.w_iou)
            loss_lr = h_loss.hybrid_loss_saliency(out_lr_up_sig, gt, opt.w_bce,
                                                  opt.w_ssim, opt.w_iou)

            loss = opt.w_hr * loss_hr + opt.w_lr * loss_lr
            loss.backward()
            optimizer.step()

            running_loss += loss.data.item()

            for i in range(gt.size(1)):
                MAE, F_beta, relaxed_F_beta = sod_utils.calc_eval_measures(
                    gt[i, 0, :, :].detach().cpu().numpy(),
                    out_sig[i, 0, :, :].detach().cpu().numpy())
                MAE_train_set_in_epoch.append(MAE)
                F_beta_train_set_in_epoch.append(F_beta)
Esempio n. 4
0
        max_relaxed_F_beta_train_set_in_epoch = 0
        avg_relaxed_F_beta_train_set_in_epoch = 0
        relaxed_F_beta_test_set_in_epoch = []
        max_relaxed_F_beta_test_set_in_epoch = 0
        avg_relaxed_F_beta_test_set_in_epoch = 0

        t = tqdm(iter(dataloader), leave=True, total=len(dataloader))
        for idx, data in enumerate(t):
            optimizer.zero_grad()
            scheduler.step()
            guidance, lr, gt = data['guidance'].to(device), data['lr'].to(
                device), data['gt'].to(device)

            out, out_sig, _ = net(guidance)

            loss = h_loss.hybrid_loss_saliency(out_sig, gt)
            loss.backward()
            optimizer.step()
            running_loss += loss.data.item()

            for i in range(gt.size(1)):
                MAE, F_beta, relaxed_F_beta = sod_utils.calc_eval_measures(
                    gt[i, 0, :, :].detach().cpu().numpy(),
                    out_sig[i, 0, :, :].detach().cpu().numpy())
                MAE_train_set_in_epoch.append(MAE)
                F_beta_train_set_in_epoch.append(F_beta)
                relaxed_F_beta_train_set_in_epoch.append(relaxed_F_beta)

            if idx % int(len(dataloader) * 0.1) == 0:
                loss_for_log = running_loss / (idx + 1)
                t.set_description(