Exemplo n.º 1
0
def test_batch_form(testdir, outdir, test_transformer, model, load_epoch,
                    stride, size_input, test_batch_size):
    import scipy.misc
    import scipy.io
    img_file_suffix, gt_file_suffix = "_sat", "_mask"

    gpu_time = AverageMeter()
    data_time = AverageMeter()
    write_time = AverageMeter()

    file_list = datasets.make_dataset_test(testdir)

    outdir_prob = outdir + "_prob"
    rm_old_mk_new_dir(outdir)
    rm_old_mk_new_dir(outdir_prob)

    model.eval()
    # switch to evaluate mode
    model.cuda()

    time_start = time.time()
    count = 0
    # count processed files

    pad_ = [0] * 4
    # pad_l, pad_r, pad_t, pad_b
    for file_path in file_list:
        file_ = datasets.default_loader(file_path, is_target=False)
        input_img = test_transformer(file_).unsqueeze(0)
        w, h = input_img.size(3), input_img.size(2)
        if w < size_input:
            pad_[0] = int(round((size_input - w) / 2))
            pad_[1] = size_input - w - pad_[0]
            w = size_input
        if h < size_input:
            pad_[2] = int(round((size_input - h) / 2))
            pad_[3] = size_input - h - pad_[2]
            h = size_input

        if any(pad_):
            mod_pad = nn.ConstantPad2d(padding=pad_, value=0.)
            input_img = mod_pad(input_img).data

        range_w = range(0, w - size_input, stride)
        range_w.append(w - size_input)
        range_h = range(0, h - size_input, stride)
        range_h.append(h - size_input)
        wh_list = []
        for wi in range_w:
            for hi in range_h:
                wh_list.append((wi, hi))

        # load binary probability matrix and get max indices
        out_prob = torch.zeros(2, h, w)
        input_ = None
        for i, (wi, hi) in enumerate(wh_list):
            if input_ is None:
                input_ = input_img[:, :, hi:hi + size_input,
                                   wi:wi + size_input]
            else:
                input_ = torch.cat((input_, input_img[:, :, hi:hi + size_input,
                                                      wi:wi + size_input]),
                                   dim=0)

            if ((i + 1) % test_batch_size
                    == 0) or (i + 1
                              == len(wh_list)):  # batch full or list ended
                input_var = torch.autograd.Variable(input_, volatile=True)

                data_time.update(time.time() - time_start)
                # data loading time

                time_start = time.time()
                # time reset
                output = model(input_var)
                gpu_time.update(time.time() - time_start)
                # computation time
                time_start = time.time()
                # time reset

                output = output.data.cpu()

                i_st = i - input_.size(0) + 1
                for count_patch, (wi, hi) in enumerate(wh_list[i_st:i + 1]):
                    out_prob[:, hi:hi + size_input, wi:wi +
                             size_input] += output[count_patch, :, :, :]

                input_ = None

        # -------------------------------------------------------------------- #
        _, out_max_ind = out_prob.max(0)
        # get ensemble prob max
        # save binary image
        out_img = out_max_ind.numpy()
        out_img = np.stack((out_img, out_img, out_img), axis=-1)

        if any(pad_):
            out_prob = out_prob[:, pad_[2]:out_prob.shape[1] - pad_[3],
                                pad_[0]:out_prob.shape[2] - pad_[1]]
            out_img = out_img[pad_[2]:out_img.shape[0] - pad_[3],
                              pad_[0]:out_img.shape[1] - pad_[1]]
            pad_ = [0] * 4

        file_name = os.path.splitext(os.path.basename(file_path))[0]
        file_name = file_name.split(img_file_suffix)[0]
        file_name = file_name + gt_file_suffix + ".png"
        scipy.misc.imsave(os.path.join(outdir, file_name), out_img)
        file_path_mat = os.path.splitext(
            os.path.basename(file_path))[0] + ".mat"
        scipy.io.savemat(os.path.join(outdir_prob, file_path_mat),
                         {'data': out_prob.numpy()})

        write_time.update(time.time() - time_start)
        # data loading time
        time_start = time.time()
        # time reset

        count += 1
        print('File: [{0}]\t'
              'Time {gpu_time.val:.3f} ({gpu_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Write {write_time.val:.3f} ({write_time.avg:.3f})'.format(
                  count,
                  gpu_time=gpu_time,
                  data_time=data_time,
                  write_time=write_time))
Exemplo n.º 2
0
def test_full_res(testdir, outdir, test_transformer, model, load_epoch,
                  size_input):
    import scipy.misc
    import scipy.io
    img_file_suffix, gt_file_suffix = "_sat", "_mask"

    gpu_time = AverageMeter()
    data_time = AverageMeter()
    write_time = AverageMeter()

    file_list = datasets.make_dataset_test(testdir)

    outdir_prob = outdir + "_prob"
    rm_old_mk_new_dir(outdir)
    rm_old_mk_new_dir(outdir_prob)

    model.eval()
    # switch to evaluate mode
    model.cuda()

    time_start = time.time()
    count = 0
    # count processed files

    pad_ = [0] * 4
    # pad_l, pad_r, pad_t, pad_b
    for count, file_path in enumerate(file_list):
        file_ = datasets.default_loader(file_path, is_target=False)
        input_img = test_transformer(file_).unsqueeze(0)
        w, h = input_img.size(3), input_img.size(2)
        if w < size_input:
            pad_[0] = int(round((size_input - w) / 2))
            pad_[1] = size_input - w - pad_[0]
            w = size_input
        if h < size_input:
            pad_[2] = int(round((size_input - h) / 2))
            pad_[3] = size_input - h - pad_[2]
            h = size_input

        if any(pad_):
            mod_pad = nn.ConstantPad2d(padding=pad_, value=0.)
            input_img = mod_pad(input_img).data

        # load binary probability matrix and get max indices
        input_ = input_img
        input_var = torch.autograd.Variable(input_, volatile=True)

        data_time.update(time.time() - time_start)
        # data loading time

        time_start = time.time()
        # time reset
        output = model(input_var)
        gpu_time.update(time.time() - time_start)
        # computation time
        time_start = time.time()
        # time reset

        out_prob = output.data.cpu().squeeze()
        # -------------------------------------------------------------------- #
        _, out_max_ind = out_prob.max(0)
        # get ensemble prob max
        # save binary image
        out_img = out_max_ind.numpy()
        out_img = np.stack((out_img, out_img, out_img), axis=-1)

        if any(pad_):
            out_prob = out_prob[:, pad_[2]:out_prob.shape[1] - pad_[3],
                                pad_[0]:out_prob.shape[2] - pad_[1]]
            out_img = out_img[pad_[2]:out_img.shape[0] - pad_[3],
                              pad_[0]:out_img.shape[1] - pad_[1]]
            pad_ = [0] * 4

        file_name = os.path.splitext(os.path.basename(file_path))[0]
        file_name = file_name.split(img_file_suffix)[0]
        file_name = file_name + gt_file_suffix + ".png"
        scipy.misc.imsave(os.path.join(outdir, file_name), out_img)
        file_path_mat = os.path.splitext(
            os.path.basename(file_path))[0] + ".mat"
        scipy.io.savemat(os.path.join(outdir_prob, file_path_mat),
                         {'data': out_prob.numpy()})

        write_time.update(time.time() - time_start)
        # data loading time
        time_start = time.time()
        # time reset

        print('File: [{0}]\t'
              'Time {gpu_time.val:.3f} ({gpu_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Write {write_time.val:.3f} ({write_time.avg:.3f})'.format(
                  count + 1,
                  gpu_time=gpu_time,
                  data_time=data_time,
                  write_time=write_time))
Exemplo n.º 3
0
def test(testdir, outdir, test_transformer, model, load_epoch, stride,
         size_input):
    import scipy.misc
    gpu_time = AverageMeter()
    data_time = AverageMeter()
    write_time = AverageMeter()

    file_list = datasets.make_dataset_test(testdir)

    if os.path.isdir(outdir):
        shutil.rmtree(outdir)
    os.mkdir(outdir)

    model.eval()
    # switch to evaluate mode
    model.cuda()

    time_start = time.time()
    count = 0
    # count processed files

    for file_path in file_list:
        file_ = datasets.default_loader(file_path, is_target=False)
        input_img = test_transformer(file_).unsqueeze(0)
        w, h = file_.size
        range_w = range(0, w - size_input, stride)
        range_w.append(w - size_input)
        range_h = range(0, h - size_input, stride)
        range_h.append(h - size_input)
        input_ = None
        for wi in range_w:
            for hi in range_h:
                if input_ is None:
                    input_ = input_img[:, :, hi:hi + size_input,
                                       wi:wi + size_input]
                else:
                    input_ = torch.cat(
                        (input_, input_img[:, :, hi:hi + size_input,
                                           wi:wi + size_input]),
                        dim=0)

        input_var = torch.autograd.Variable(input_, volatile=True)

        data_time.update(time.time() - time_start)
        # data loading time

        time_start = time.time()
        # time reset
        output = model(input_var)
        gpu_time.update(time.time() - time_start)
        # computation time
        time_start = time.time()
        # time reset

        #        # softmax computation
        output = output.data.cpu()
        #        max_output, _ = output.max(1);
        #        output.sub_(max_output.unsqueeze(1)).exp_().div_(output.sum(1).unsqueeze(1));

        # load binary probability matrix and get max indices
        out_prob = torch.zeros(2, h, w)
        count_patch = 0
        for wi in range_w:
            for hi in range_h:
                out_prob[:, hi:hi + size_input,
                         wi:wi + size_input] += output[count_patch, :, :, :]
                count_patch += 1
        _, out_max_ind = out_prob.max(0)
        # get ensemble prob max
        # save binary image
        out_img = out_max_ind.numpy()
        scipy.misc.imsave(os.path.join(outdir, os.path.basename(file_path)),
                          out_img)

        write_time.update(time.time() - time_start)
        # data loading time
        time_start = time.time()
        # time reset

        count += 1
        print('File: [{0}]\t'
              'Time {gpu_time.val:.3f} ({gpu_time.avg:.3f})\t'
              'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
              'Write {write_time.val:.3f} ({write_time.avg:.3f})'.format(
                  count,
                  gpu_time=gpu_time,
                  data_time=data_time,
                  write_time=write_time))