예제 #1
0
def test_full_res(testdir, outdir, test_transformer, model, load_epoch,
                  size_input):
    import scipy.misc
    import scipy.io
    img_file_suffix, gt_file_suffix = "_sat", "_mask"

    gpu_time = AverageMeter()
    data_time = AverageMeter()
    write_time = AverageMeter()

    file_list = datasets.make_dataset_test(testdir)

    outdir_prob = outdir + "_prob"
    rm_old_mk_new_dir(outdir)
    rm_old_mk_new_dir(outdir_prob)

    model.eval()
    # switch to evaluate mode
    model.cuda()

    time_start = time.time()
    count = 0
    # count processed files

    pad_ = [0] * 4
    # pad_l, pad_r, pad_t, pad_b
    for count, file_path in enumerate(file_list):
        file_ = datasets.default_loader(file_path, is_target=False)
        input_img = test_transformer(file_).unsqueeze(0)
        w, h = input_img.size(3), input_img.size(2)
        if w < size_input:
            pad_[0] = int(round((size_input - w) / 2))
            pad_[1] = size_input - w - pad_[0]
            w = size_input
        if h < size_input:
            pad_[2] = int(round((size_input - h) / 2))
            pad_[3] = size_input - h - pad_[2]
            h = size_input

        if any(pad_):
            mod_pad = nn.ConstantPad2d(padding=pad_, value=0.)
            input_img = mod_pad(input_img).data

        # load binary probability matrix and get max indices
        input_ = input_img
        input_var = torch.autograd.Variable(input_, volatile=True)

        data_time.update(time.time() - time_start)
        # data loading time

        time_start = time.time()
        # time reset
        output = model(input_var)
        gpu_time.update(time.time() - time_start)
        # computation time
        time_start = time.time()
        # time reset

        out_prob = output.data.cpu().squeeze()
        # -------------------------------------------------------------------- #
        _, out_max_ind = out_prob.max(0)
        # get ensemble prob max
        # save binary image
        out_img = out_max_ind.numpy()
        out_img = np.stack((out_img, out_img, out_img), axis=-1)

        if any(pad_):
            out_prob = out_prob[:, pad_[2]:out_prob.shape[1] - pad_[3],
                                pad_[0]:out_prob.shape[2] - pad_[1]]
            out_img = out_img[pad_[2]:out_img.shape[0] - pad_[3],
                              pad_[0]:out_img.shape[1] - pad_[1]]
            pad_ = [0] * 4

        file_name = os.path.splitext(os.path.basename(file_path))[0]
        file_name = file_name.split(img_file_suffix)[0]
        file_name = file_name + gt_file_suffix + ".png"
        scipy.misc.imsave(os.path.join(outdir, file_name), out_img)
        file_path_mat = os.path.splitext(
            os.path.basename(file_path))[0] + ".mat"
        scipy.io.savemat(os.path.join(outdir_prob, file_path_mat),
                         {'data': out_prob.numpy()})

        write_time.update(time.time() - time_start)
        # data loading time
        time_start = time.time()
        # time reset

        print('File: [{0}]\t'
              'Time {gpu_time.val:.3f} ({gpu_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Write {write_time.val:.3f} ({write_time.avg:.3f})'.format(
                  count + 1,
                  gpu_time=gpu_time,
                  data_time=data_time,
                  write_time=write_time))
예제 #2
0
def test(testdir, outdir, test_transformer, model, load_epoch, stride,
         size_input):
    import scipy.misc
    gpu_time = AverageMeter()
    data_time = AverageMeter()
    write_time = AverageMeter()

    file_list = datasets.make_dataset_test(testdir)

    if os.path.isdir(outdir):
        shutil.rmtree(outdir)
    os.mkdir(outdir)

    model.eval()
    # switch to evaluate mode
    model.cuda()

    time_start = time.time()
    count = 0
    # count processed files

    for file_path in file_list:
        file_ = datasets.default_loader(file_path, is_target=False)
        input_img = test_transformer(file_).unsqueeze(0)
        w, h = file_.size
        range_w = range(0, w - size_input, stride)
        range_w.append(w - size_input)
        range_h = range(0, h - size_input, stride)
        range_h.append(h - size_input)
        input_ = None
        for wi in range_w:
            for hi in range_h:
                if input_ is None:
                    input_ = input_img[:, :, hi:hi + size_input,
                                       wi:wi + size_input]
                else:
                    input_ = torch.cat(
                        (input_, input_img[:, :, hi:hi + size_input,
                                           wi:wi + size_input]),
                        dim=0)

        input_var = torch.autograd.Variable(input_, volatile=True)

        data_time.update(time.time() - time_start)
        # data loading time

        time_start = time.time()
        # time reset
        output = model(input_var)
        gpu_time.update(time.time() - time_start)
        # computation time
        time_start = time.time()
        # time reset

        #        # softmax computation
        output = output.data.cpu()
        #        max_output, _ = output.max(1);
        #        output.sub_(max_output.unsqueeze(1)).exp_().div_(output.sum(1).unsqueeze(1));

        # load binary probability matrix and get max indices
        out_prob = torch.zeros(2, h, w)
        count_patch = 0
        for wi in range_w:
            for hi in range_h:
                out_prob[:, hi:hi + size_input,
                         wi:wi + size_input] += output[count_patch, :, :, :]
                count_patch += 1
        _, out_max_ind = out_prob.max(0)
        # get ensemble prob max
        # save binary image
        out_img = out_max_ind.numpy()
        scipy.misc.imsave(os.path.join(outdir, os.path.basename(file_path)),
                          out_img)

        write_time.update(time.time() - time_start)
        # data loading time
        time_start = time.time()
        # time reset

        count += 1
        print('File: [{0}]\t'
              'Time {gpu_time.val:.3f} ({gpu_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Write {write_time.val:.3f} ({write_time.avg:.3f})'.format(
                  count,
                  gpu_time=gpu_time,
                  data_time=data_time,
                  write_time=write_time))
예제 #3
0
def test_batch_form(testdir, outdir, test_transformer, model, load_epoch,
                    stride, size_input, test_batch_size):
    import scipy.misc
    import scipy.io
    img_file_suffix, gt_file_suffix = "_sat", "_mask"

    gpu_time = AverageMeter()
    data_time = AverageMeter()
    write_time = AverageMeter()

    file_list = datasets.make_dataset_test(testdir)

    outdir_prob = outdir + "_prob"
    rm_old_mk_new_dir(outdir)
    rm_old_mk_new_dir(outdir_prob)

    model.eval()
    # switch to evaluate mode
    model.cuda()

    time_start = time.time()
    count = 0
    # count processed files

    pad_ = [0] * 4
    # pad_l, pad_r, pad_t, pad_b
    for file_path in file_list:
        file_ = datasets.default_loader(file_path, is_target=False)
        input_img = test_transformer(file_).unsqueeze(0)
        w, h = input_img.size(3), input_img.size(2)
        if w < size_input:
            pad_[0] = int(round((size_input - w) / 2))
            pad_[1] = size_input - w - pad_[0]
            w = size_input
        if h < size_input:
            pad_[2] = int(round((size_input - h) / 2))
            pad_[3] = size_input - h - pad_[2]
            h = size_input

        if any(pad_):
            mod_pad = nn.ConstantPad2d(padding=pad_, value=0.)
            input_img = mod_pad(input_img).data

        range_w = range(0, w - size_input, stride)
        range_w.append(w - size_input)
        range_h = range(0, h - size_input, stride)
        range_h.append(h - size_input)
        wh_list = []
        for wi in range_w:
            for hi in range_h:
                wh_list.append((wi, hi))

        # load binary probability matrix and get max indices
        out_prob = torch.zeros(2, h, w)
        input_ = None
        for i, (wi, hi) in enumerate(wh_list):
            if input_ is None:
                input_ = input_img[:, :, hi:hi + size_input,
                                   wi:wi + size_input]
            else:
                input_ = torch.cat((input_, input_img[:, :, hi:hi + size_input,
                                                      wi:wi + size_input]),
                                   dim=0)

            if ((i + 1) % test_batch_size
                    == 0) or (i + 1
                              == len(wh_list)):  # batch full or list ended
                input_var = torch.autograd.Variable(input_, volatile=True)

                data_time.update(time.time() - time_start)
                # data loading time

                time_start = time.time()
                # time reset
                output = model(input_var)
                gpu_time.update(time.time() - time_start)
                # computation time
                time_start = time.time()
                # time reset

                output = output.data.cpu()

                i_st = i - input_.size(0) + 1
                for count_patch, (wi, hi) in enumerate(wh_list[i_st:i + 1]):
                    out_prob[:, hi:hi + size_input, wi:wi +
                             size_input] += output[count_patch, :, :, :]

                input_ = None

        # -------------------------------------------------------------------- #
        _, out_max_ind = out_prob.max(0)
        # get ensemble prob max
        # save binary image
        out_img = out_max_ind.numpy()
        out_img = np.stack((out_img, out_img, out_img), axis=-1)

        if any(pad_):
            out_prob = out_prob[:, pad_[2]:out_prob.shape[1] - pad_[3],
                                pad_[0]:out_prob.shape[2] - pad_[1]]
            out_img = out_img[pad_[2]:out_img.shape[0] - pad_[3],
                              pad_[0]:out_img.shape[1] - pad_[1]]
            pad_ = [0] * 4

        file_name = os.path.splitext(os.path.basename(file_path))[0]
        file_name = file_name.split(img_file_suffix)[0]
        file_name = file_name + gt_file_suffix + ".png"
        scipy.misc.imsave(os.path.join(outdir, file_name), out_img)
        file_path_mat = os.path.splitext(
            os.path.basename(file_path))[0] + ".mat"
        scipy.io.savemat(os.path.join(outdir_prob, file_path_mat),
                         {'data': out_prob.numpy()})

        write_time.update(time.time() - time_start)
        # data loading time
        time_start = time.time()
        # time reset

        count += 1
        print('File: [{0}]\t'
              'Time {gpu_time.val:.3f} ({gpu_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Write {write_time.val:.3f} ({write_time.avg:.3f})'.format(
                  count,
                  gpu_time=gpu_time,
                  data_time=data_time,
                  write_time=write_time))
예제 #4
0
def regen_dataset(model, in_data_dir, in_gt_dir, out_data_dir, out_gt_dir,
                  sample_per_file, size_input, transform_joint, transform_img,
                  transform_target, transform_reverse, test_batch_size,
                  max_test_per_file, prec_th, rec_th):
    import scipy.misc
    gpu_time = AverageMeter()
    data_time = AverageMeter()
    write_time = AverageMeter()

    file_list = datasets.make_dataset(in_data_dir, in_gt_dir, do_copy=False)

    rm_old_mk_new_dir(out_data_dir)
    rm_old_mk_new_dir(out_gt_dir)

    model.eval()
    # switch to evaluate mode
    model.cuda()

    time_start = time.time()
    count = 0
    # count processed files

    for count_files, file_path in enumerate(file_list):
        sample_saved, sample_tested = 0, 0
        img = datasets.default_loader(file_path[0], is_target=False)
        gt = datasets.default_loader(file_path[1], is_target=True)

        while sample_tested < max_test_per_file:
            input_, target = None, None
            input_, target = transform_joint(img, gt)
            input_, target = transform_img(input_), transform_target(target)
            input_, target = input_.unsqueeze(0), target.unsqueeze(0)
            for b in range(test_batch_size):
                tmp_input, tmp_target = transform_joint(img, gt)
                tmp_input, tmp_target = transform_img(
                    tmp_input), transform_target(tmp_target)
                tmp_input = tmp_input.unsqueeze(0)
                input_ = torch.cat((input_, tmp_input), dim=0)
                target = torch.cat((target, tmp_target), dim=0)

            sample_tested += test_batch_size

            input_var = torch.autograd.Variable(input_, volatile=True)
            target = target.long()

            data_time.update(time.time() - time_start)
            # data loading time

            time_start = time.time()
            # time reset
            # compute output
            output = model(input_var)
            gpu_time.update(time.time() - time_start)
            # computation time
            time_start = time.time()
            # time reset

            prec_batch, recall_batch = get_prec_recall_batch(
                output.data.cpu(), target)

            ind_to_save = (prec.lt(prec_th) * recall.lt(rec_th)).eq(1)
            # binary
            for i in range(ind_to_save.size(0)):
                if ind_to_save[i] == 0:
                    continue

                out_img = transform_reverse(input_[i, :, :, :]).numpy()
                out_img = np.around(out_img * 255).astype(np.uint8)
                out_img = np.transpose(out_img, (1, 2, 0))
                out_gt = (target[i, :, :].numpy() * 255).astype(np.uint8)

                count += 1
                fname = str(count) + ".png"
                scipy.misc.imsave(os.path.join(out_data_dir, fname), out_img)
                scipy.misc.imsave(os.path.join(out_gt_dir, fname), out_gt)

            write_time.update(time.time() - time_start)
            # data loading time
            time_start = time.time()
            # time reset

            print('File (Tested): [{0}({1})]\t'
                  'Time {gpu_time.val:.3f} ({gpu_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Write {write_time.val:.3f} ({write_time.avg:.3f})'.format(
                      count_files + 1,
                      sample_tested,
                      gpu_time=gpu_time,
                      data_time=data_time,
                      write_time=write_time))