Ejemplo n.º 1
0
sys.path.append(mlDir)

import pfm

# =============================================================================
argv = sys.argv[1:]
if not ((len(argv) == 1) or (len(argv) == 2)):
    print("Usage: ./pfm_to_png.py <input.pfm> [exposure]")
    sys.exit(0)

inputPath = os.path.abspath(argv[0])
inputDir = os.path.dirname(inputPath)
inputFilename = os.path.basename(inputPath)
stem, ext = os.path.splitext(inputFilename)
if ext != ".pfm":
    print("Expected a .pfm file as input")
    sys.exit(0)
outFilename = "{}.png".format(stem)
outFilepath = os.path.join(inputDir, outFilename)

# Load the PFM
img = pfm.load(inputPath)

# Autoexposure or manual exposure
if len(argv) == 2:
    exposure = float(argv[1])
else:
    exposure = img.computeAutoexposure()

img.save_png(outFilepath, exposure, 2.2, reverse=True)
Ejemplo n.º 2
0
    def __getitem__(self, idx):

        datum = self.data_list[idx]

        dirname = datum["directory"]
        x = datum["x"]
        y = datum["y"]
        log_normalization = datum["log_normalization"]
        sqrt_normalization = datum["sqrt_normalization"]
        aug = datum["aug"]

        # Generate file names
        p_name, d_name, n_name, z_name = generate_pfm_filenames(dirname, x, y)

        # Load PFM files
        p_pfm = pfm.load(p_name)
        d_pfm = pfm.load(d_name)
        n_pfm = pfm.load(n_name)
        z_pfm = pfm.load(z_name)

        thePfms = [p_pfm, d_pfm, n_pfm, z_pfm]

        # Data augmentation ---------------------------------------------------
        iispt_transforms.augmentList(thePfms, aug)

        # Data transformations ------------------------------------------------

        # Transform P
        p_pfm.normalize_intensity_downstream_half()

        # Transform D
        dmean = d_pfm.normalize_intensity_downstream_full()

        # Transform N
        n_pfm.normalize(-1.0, 1.0)
        if ABLATE_NORMALS:
            n_pfm.clear()

        # Transform Z
        z_pfm.normalize_distance_downstream_full()
        if ABLATE_DISTANCE:
            z_pfm.clear()

        # Convert from numpy to tensors and create results
        result = {}

        result["p"] = torch.from_numpy(pfm_to_conv_np_array(p_pfm)).float()

        result["t"] = torch.from_numpy(
            concatenate_conv_np_arrays(d_pfm, n_pfm, z_pfm)).float()

        result["p_name"] = p_name

        result["d_name"] = d_name

        result["n_name"] = n_name

        result["z_name"] = z_name

        result["mean"] = dmean

        result["aug"] = aug

        return result
Ejemplo n.º 3
0
def process(fp):
    img = pfm.load(fp)
    img.flipY()
    img.save_pfm(fp)
Ejemplo n.º 4
0
import os

rootdir = os.path.abspath(os.path.join(__file__, "..", ".."))

mldir = os.path.join(rootdir, "ml")

import sys
sys.path.append(mldir)

import pfm

argv = sys.argv[1:]

argv0 = argv[0]
argv1 = argv[1]

img = pfm.load(argv0)

img.jacobian_transform()
img.save_png(argv1, 0.0, 1.8)
Ejemplo n.º 5
0
    def load_scene(self, index):
        """
        Loads one scene

        :param index: scene index in range(0, len(dataset))
        :type index: int
        """
        import skimage.io

        scene = self.scenes[index]
        files = [f.name for f in os.scandir(scene)]
        imgs = [
            f for f in files
            if (f.endswith('.png') or f.endswith('.jpg') or f.endswith('.jpeg')
                ) and 'normals' not in f and 'mask' not in f
            and 'objectids' not in f and 'unused' not in f
        ]
        imgs.sort()

        # compute indices of cross setup
        w, h = self.nviews
        us = [int(h / 2) * w + i for i in range(h)]
        vs = [int(w / 2) + w * i for i in range(h)]

        h_views = []
        for i in us:
            substr = str(i).zfill(3)
            fname = imgs[i]
            fname = os.path.join(scene, fname)
            h_views.append(
                skimage.img_as_float(skimage.io.imread(fname)).astype(
                    np.float32))
        h_views = np.stack(h_views)
        h_views = h_views.transpose((0, 3, 1, 2))

        v_views = []
        for i in vs:
            substr = str(i).zfill(3)
            fname = imgs[i]
            fname = os.path.join(scene, fname)
            v_views.append(
                skimage.img_as_float(skimage.io.imread(fname)).astype(
                    np.float32))
        v_views = np.stack(v_views)
        v_views = v_views.transpose((0, 3, 1, 2))

        # extract center view
        center = v_views[int(h / 2)].copy()

        # try to find the ground truth disparity pfm file
        pfms = [f for f in files if f.endswith('.pfm')]

        if len(pfms) > 1:
            # only load files with 'disp' in the name
            pfms = [f for f in pfms if 'disp' in f]
        if len(pfms) > 1:
            # only load lowres file
            pfms = [f for f in pfms if 'lowres' in f]
        if len(pfms) > 1:
            # only load center view
            pfms = [f for f in pfms if str(us[int(w / 2)]).zfill(3) in f]

        # load ground truth disparity
        gt = np.zeros_like(center[0])
        if len(pfms) > 0:
            gt = pfm.load(os.path.join(scene, pfms[0]))
            gt = np.flip(gt, 0).copy()

        index = np.atleast_1d(index)

        return h_views, v_views, center, gt, index
Ejemplo n.º 6
0
rootDir = os.path.dirname(toolsDir)
mlDir = os.path.join(rootDir, "ml")
sys.path.append(mlDir)

import pfm
import scipy.stats
import numpy

argv = sys.argv[1:]

referencePath = os.path.abspath(argv[0])
testPath = os.path.abspath(argv[1])
print("Reference {}".format(referencePath))
print("Test {}".format(testPath))

referenceImg = pfm.load(referencePath)
testImg = pfm.load(testPath)
ssimValue = testImg.computeStructuralSimilarity(referenceImg)
print("SSIM {}".format(ssimValue))

# Compute entropy on the test image
data = testImg.data
data = data.flatten()
entropy = scipy.stats.entropy(data)
print("Entropy {}".format(entropy))

# Compute SNR
mean = numpy.mean(data)
std = numpy.std(data)
print("SNR {}".format(mean / std))
def main():

    # Load dataset
    trainset, testset = iispt_dataset.load_dataset(config.testset, 0.0)

    selected_set = testset
    selected_set_len = testset.__len__()

    # Load model
    net = iispt_net.IISPTNet()
    net.load_state_dict(torch.load(config.model_path))
    # Put in eval mode
    net.eval()
    print("Model loaded")

    # Statistics accumulators
    statLowL1 = []
    statLowSs = []
    statGaussianL1 = []
    statGaussianSs = []
    statResultL1 = []
    statResultSs = []

    # Loop for each test example
    print("Processing {} items".format(selected_set_len))
    for i in range(selected_set_len):

        if i % 100 == 0:
            print("Processing index {}".format(i))

        item = selected_set.__getitem__(i)
        aug = item["aug"]
        if aug != 0:
            # Only process un-augmented samples
            continue
        item_input = item["t"]
        item_input = item_input.unsqueeze(0)

        # Run the network on the data
        input_variable = Variable(item_input)
        result = net(input_variable)

        resultImg = pfm.loadFromConvOutNpArray(result.data.numpy()[0])
        resultImg.normalize_intensity_upstream(item["mean"])

        expectedImg = pfm.load(item["p_name"])

        lowImg = pfm.load(item["d_name"])

        # Normalize the maps according to their mean for better statistics
        resultImg.divideMean()
        expectedImg.divideMean()
        lowImg.divideMean()

        gaussianImg = lowImg.makeCopy()
        gaussianImg.gaussianBlur(1.0)

        # Compute metrics on 1SPP
        lowL1 = lowImg.computeL1Loss(expectedImg)
        lowSs = lowImg.computeStructuralSimilarity(expectedImg)

        # Compute metrics on blurred
        gaussianL1 = gaussianImg.computeL1Loss(expectedImg)
        gaussianSs = gaussianImg.computeStructuralSimilarity(expectedImg)

        # Compute metrics on NN predicted
        resultL1 = resultImg.computeL1Loss(expectedImg)
        resultSs = resultImg.computeStructuralSimilarity(expectedImg)

        # Record statistics
        statLowL1.append(lowL1)
        statLowSs.append(lowSs)
        statGaussianL1.append(gaussianL1)
        statGaussianSs.append(gaussianSs)
        statResultL1.append(resultL1)
        statResultSs.append(resultSs)

    print("Statistics collection completed")

    # To numpy
    statLowL1 = numpy.array(statLowL1)
    statLowSs = numpy.array(statLowSs)
    statGaussianL1 = numpy.array(statGaussianL1)
    statGaussianSs = numpy.array(statGaussianSs)
    statResultL1 = numpy.array(statResultL1)
    statResultSs = numpy.array(statResultSs)

    plot(statLowL1, statGaussianL1, statResultL1, "L1", -0.1, 1.6)
    plot(statLowSs, statGaussianSs, statResultSs, "Structural Similarity", -0.1, 1.0)

    # Compute P values for L1
    t, p =  scipy.stats.kruskal(statGaussianL1, statResultL1)
    print("P value L1 gaussian-predicted {}".format(p))
    
    # Compute P values for Ss
    t, p =  scipy.stats.kruskal(statGaussianSs, statResultSs)
    print("P value Ss gaussian-predicted {}".format(p))

    # Compute P values for L1
    t, p =  scipy.stats.kruskal(statLowL1, statResultL1)
    print("P value L1 low-predicted {}".format(p))
    
    # Compute P values for Ss
    t, p =  scipy.stats.kruskal(statLowSs, statResultSs)
    print("P value Ss low-predicted {}".format(p))
Ejemplo n.º 8
0
def main():

    # Load dataset
    trainset, testset = iispt_dataset.load_dataset(config.testset, 0.0)

    selected_set = testset
    selected_set_len = testset.__len__()

    # Load model
    net = iispt_net.IISPTNet()
    net.load_state_dict(torch.load(config.model_path))
    # Put in eval mode
    net.eval()
    print_force("#LOADCOMPLETE {}".format(selected_set_len))

    # Loop for console info
    for line in sys.stdin:
        if line.endswith("\n"):
            line = line[:-1]

        idx = int(line)
        print_force("Requesting index {}".format(idx))

        datum = selected_set.get_datum(idx)
        if datum is None:
            print_force("Out of range!")
            continue
        item = selected_set.__getitem__(idx)
        item_input = item["t"]
        item_input = item_input.unsqueeze(0)
        item_expected = item["p"]
        aug = item["aug"]

        # Run the network on the data
        input_variable = Variable(item_input)
        result = net(input_variable)

        # Save the created result
        result_image = pfm.loadFromConvOutNpArray(result.data.numpy()[0])
        # Upstream processing
        result_image.normalize_intensity_upstream(item["mean"])

        # Save the expected result
        expected_image = pfm.load(item["p_name"])
        iispt_transforms.augmentList([expected_image], aug)
        expectedExposure = expected_image.computeAutoexposure()
        expected_image.save_png("interactiveExpected.png", expectedExposure,
                                GAMMA)

        # Save the created result
        result_image.save_png("interactiveResult.png", expectedExposure, GAMMA)

        # Save the normals map
        normalsImage = pfm.load(item["n_name"])
        iispt_transforms.augmentList([normalsImage], aug)
        normalsImage.save_png("interactiveNormals.png",
                              normalsImage.computeAutoexposure(), GAMMA)

        # Save the distance map
        distanceImage = pfm.load(item["z_name"])
        iispt_transforms.augmentList([distanceImage], aug)
        distanceImage.save_png("interactiveDistance.png",
                               distanceImage.computeAutoexposure(), GAMMA)

        # Save 1SPP path
        lowSamplesImage = pfm.load(item["d_name"])
        iispt_transforms.augmentList([lowSamplesImage], aug)
        lowSamplesImage.save_png("interactiveLow.png", expectedExposure, GAMMA)

        # Make gaussian blur of 1SPP
        gaussianBlurred = lowSamplesImage.makeCopy()
        gaussianBlurred.gaussianBlur(1.0)
        gaussianBlurred.save_png("interactiveBlurred.png", expectedExposure,
                                 GAMMA)

        # Compute metrics on the 1SPP path
        lowSamplesL1 = lowSamplesImage.computeL1Loss(expected_image)
        lowSamplesSs = lowSamplesImage.computeStructuralSimilarity(
            expected_image)
        print_force("#LOWL1 {}".format(lowSamplesL1))
        print_force("#LOWSS {}".format(lowSamplesSs))

        # Compute metrics on blurred
        gaussianBlurredL1 = gaussianBlurred.computeL1Loss(expected_image)
        gaussianBlurredSs = gaussianBlurred.computeStructuralSimilarity(
            expected_image)
        print_force("#GAUSSL1 {}".format(gaussianBlurredL1))
        print_force("#GAUSSSS {}".format(gaussianBlurredSs))

        # Compute metrics on NN predicted
        resultL1 = result_image.computeL1Loss(expected_image)
        resultSs = result_image.computeStructuralSimilarity(expected_image)
        print_force("#RESL1 {}".format(resultL1))
        print_force("#RESSS {}".format(resultSs))

        # Output filename
        print_force("#NAME {}".format(item["p_name"]))

        print_force("#EVALUATECOMPLETE")
Ejemplo n.º 9
0
        if bindex >= 0:
            buckets[bindex] += 1

    # Print buckets
    for i in range(len(buckets)):
        print("{} - {}".format(buckets_starts[i], buckets[i]))

    # Plot
    data = [go.Bar(x=buckets_starts, y=buckets)]
    plotly.offline.plot({"data": data, "layout": go.Layout(title=plotname)})


# Generate histogram for raw data
standard_imgs = []
for fpath in flist:
    standard_imgs.append(pfm.load(fpath))
histogram(standard_imgs, "Raw intensity")

# Generate histogram after log transform
log_imgs = []
for fpath in flist:
    img = pfm.load(fpath)
    img.map(iispt_transforms.LogTransform())
    log_imgs.append(img)
histogram(log_imgs, "Log transform")

# GEnerate histogram after log + gamma transform
lg_imgs = []
for fpath in flist:
    img = pfm.load(fpath)
    img.normalize_log_gamma(NORMALIZATION_INTENSITY, GAMMA_VALUE)