예제 #1
0
def load_standard_abs_error(indicator_path, gt_path, est_path, unc_path,
                            model_names):
    model_names = ['ConfMap_CVA-Net_' + x + '.pfm' for x in model_names]
    file_list = sorted(os.listdir(unc_path))
    file_list = [file + '.png' for file in file_list]

    error_abs_good, error_abs_hard = [], []
    unc_good = [[] for _ in model_names]
    unc_hard = [[] for _ in model_names]

    for img_path in file_list:
        indi = read(indicator_path + img_path)
        gt = read(gt_path + img_path).astype(float)
        est = read(est_path + img_path).astype(float)
        error_abs = np.abs(gt - est)
        index_good = np.where(indi == 1)
        index_hard = np.where(np.logical_and(indi == 0, gt != 0))
        error_abs_good.extend(error_abs[index_good])
        error_abs_hard.extend(error_abs[index_hard])
        for i, model_name in enumerate(model_names):
            unc = read(unc_path + img_path.replace('.png', '/') + model_name)
            unc_good[i].extend(unc[index_good])
            unc_hard[i].extend(unc[index_hard])
        # break

    error_abs_standard_good = np.array(error_abs_good) / np.array(unc_good)
    error_abs_standard_hard = np.array(error_abs_hard) / np.array(unc_hard)

    return error_abs_standard_good, error_abs_standard_hard
    def create_cost_volume(self, sample):
        image_left = image_io.read(sample.left_image_path)
        image_right = image_io.read(sample.right_image_path)

        cm = census_metric.CensusMetric(5, 5)
        census_left = cm.__create_census_trafo__(image_left)
        census_right = cm.__create_census_trafo__(image_right)
        return cm.__compute_cost_volume__(census_left, census_right, self.cost_volume_depth)
    def training_samples_from_GT(self,
                                 sample_name,
                                 gt_path,
                                 step_size,
                                 offset,
                                 est_path=None,
                                 indicator_path=None):
        """ Creates a set of sample IDs based on a specified reference disparty map.

        Based on the specified step size and offset, the reference disparity map is sampled and for every pixel
        with a reference disparity available one sample ID is created.

        @param sample_name: Name of the current sample (e.g. left image path, cost volume path)
        @param gt_path: Path of the reference disparity map
        @param step_size: Specifies the distance between two sample points
        @param offset: Specifies the offset of the first sample point from the image origin
        @return: A list of sample IDs and the normalised reference disparity map
        """

        # Read ground truth and normalise values if necessary
        disp_gt = image_io.read(gt_path)
        dimensions = disp_gt.shape

        # Read region indicator
        if indicator_path:
            mask_indicator = image_io.read(indicator_path)

        # Read estimated disparity if necessary
        if est_path:
            disp_est = image_io.read(est_path)

        # Assure that there are no constructs like -inf, inf
        disp_gt[disp_gt == -inf] = 0
        disp_gt[disp_gt == inf] = 0

        # Check for available ground truth points
        training_samples = []
        indicator = True if indicator_path else False
        print('using indicators') if indicator else print(
            'not using indicators')
        for row in range(offset[0], dimensions[0], step_size[0]):
            for col in range(offset[1], dimensions[1], step_size[1]):
                gt_value = disp_gt[row][col]
                if gt_value > 0 and gt_value < self.dim[2]:
                    # Ground truth point is available -> Create sample for this pixel
                    est_value = disp_est[row][col] if est_path else None
                    indicator_value = mask_indicator[row][
                        col] if indicator_path else 1
                    training_samples.append(
                        TrainingSample(sample_name,
                                       row,
                                       col,
                                       gt_value,
                                       est_value,
                                       indicator=indicator,
                                       indicator_value=indicator_value))

        return training_samples, disp_gt
    def create_image_dict(self, data_samples):
        """ Loads images based on provided file list and stores them in a dictionary.

        @param data_samples: List containing data samples
        @return: A dictionary of type: key: left image path, data: tuple[left image, left census, right image, right census]
        """

        image_dict = {}
        for data_sample in data_samples:
            # Load and store left and right image
            left_image = image_io.read(data_sample.left_image_path)
            right_image = image_io.read(data_sample.right_image_path)
            image_dict[data_sample.left_image_path] = [left_image, right_image]
        return image_dict
    def create_cv_dict(self, data_samples):
        """ Loads all cost volumes to memory.

        @param data_samples: List of data samples to load.
        @return: A dictionary containing all specified cost volumes with their path as key attribute.
        """

        cv_dict = {}
        for data_sample in data_samples:

            # Load and store cost volume
            cv_path = data_sample.cost_volume_path
            cv = cost_volume.CostVolume()
            if (cv_path[-3:] == 'bin'):
                # To get the cost volume dimensions the ground truth disparity map is used
                disp_gt = image_io.read(data_sample.gt_path)
                cv.load_bin(cv_path, disp_gt.shape[0], disp_gt.shape[1],
                            data_sample.cost_volume_depth)

            elif cv_path[-3:] == 'dat':
                cv.load_dat(cv_path)

            else:
                with open(cv_path, 'rb') as file:
                    cv = pickle.load(file)

            if data_sample.cost_volume_depth > self.dim[2]:
                cv.reduce_depth(self.dim[2])

            # Normalise the cost volume
            cv.normalise(self.cv_norm[0], self.cv_norm[1])
            print('sample cv max: ', np.max(cv.get_data()))
            print('sample cv min: ', np.min(cv.get_data()))
            cv_dict[cv_path] = cv
        return cv_dict
예제 #6
0
 def select_image(self):
     path = filedialog.askopenfilename()
     self.last_path = path
     if len(path) > 0:
         return image_io.read(path)
     else:
         print('Invalid image')
         return None
    def load_standard_abs_error(self):
        model_name = 'ConfMap_CVA-Net_' + self.model_name + '.pfm'
        file_list = sorted(os.listdir(self.unc_path))
        file_list = [file + '.png' for file in file_list]

        error_abs_good, unc_good, gt_good, est_good = [], [], [], []
        error_abs_hard, unc_hard, gt_hard, est_hard = [], [], [], []

        for img_path in file_list:
            indi = read(self.indicator_path + img_path)
            gt = read(self.gt_path + img_path).astype(float)
            est = read(self.est_path + img_path).astype(float)
            error_abs = np.abs(gt - est)
            unc = read(self.unc_path + img_path.replace('.png', '/') +
                       model_name)
            index_good = np.where(indi == 1)
            index_hard = np.where(np.logical_and(indi == 0, gt != 0))
            # data of good region
            error_abs_good.extend(error_abs[index_good])
            unc_good.extend(unc[index_good])
            gt_good.extend(gt[index_good])
            est_good.extend(est[index_good])
            # data of hard region
            error_abs_hard.extend(error_abs[index_hard])
            unc_hard.extend(unc[index_hard])
            gt_hard.extend(gt[index_hard])
            est_hard.extend(est[index_hard])
            # break

        error_abs_standard_good = np.array(error_abs_good) / np.array(unc_good)
        error_abs_standard_hard = np.array(error_abs_hard) / np.array(unc_hard)

        data_good = np.column_stack(
            [error_abs_standard_good, gt_good, est_good, unc_good])
        data_hard = np.column_stack(
            [error_abs_standard_hard, gt_hard, est_hard, unc_hard])

        return data_good, data_hard
예제 #8
0
def load_abs_error(indicator_path, gt_path, est_path):
    file_list = sorted(os.listdir(gt_path))

    error_abs_list, error_abs_good, error_abs_hard = [], [], []

    for img_path in file_list:
        gt = read(gt_path + img_path).astype(float)
        img_path = img_path.replace('.pfm', '.png')
        est = read(est_path + img_path).astype(float)
        error_abs = np.abs(gt - est)
        error_abs_list.extend(error_abs[gt != 0])

        if indicator_path:
            indi = read(indicator_path + img_path)
            index_good = np.where(indi == 1)
            index_hard = np.where(np.logical_and(indi == 0, gt != 0))
            # data of good region
            error_abs_good.extend(error_abs[index_good])
            # data of hard region
            error_abs_hard.extend(error_abs[index_hard])

    return np.array(error_abs_list), np.array(error_abs_good), np.array(
        error_abs_hard)
def calc_mask_pred_acc(model_name,
                       disp_gt_path,
                       mask_gt_path,
                       mask_pred_path,
                       thresh=0.5):
    number_of_corr_list, number_of_total_list = [], []
    TP_list, TN_list, FP_list, FN_list = [], [], [], []
    image_names = sorted(os.listdir(mask_pred_path))
    for image_name in image_names:
        disp_gt = read(os.path.join(disp_gt_path, image_name + '.png'))
        mask_gt = read(os.path.join(mask_gt_path, image_name + '.png'))
        mask_pred = np.load(
            os.path.join(mask_pred_path, image_name, model_name + '.npy'))
        mask_pred_thresh = np.ones_like(mask_pred)
        mask_pred_thresh[mask_pred <= thresh] = 0

        # select only pixels with gt disparities
        gt_index = np.where(disp_gt > 0)
        mask_pred_thresh_valid = mask_pred_thresh[gt_index]
        mask_gt_valid = mask_gt[gt_index]

        # compute accuracy and confusion matrix
        number_of_corr = (mask_pred_thresh_valid == mask_gt_valid).sum()
        TN, FP, FN, TP = confusion_matrix(mask_gt_valid,
                                          mask_pred_thresh_valid).ravel()

        number_of_corr_list.append(number_of_corr)
        number_of_total_list.append(len(mask_gt[gt_index]))
        TN_list.append(TN)
        FP_list.append(FP)
        FN_list.append(FN)
        TP_list.append(TP)

    acc = sum(number_of_corr_list) / sum(number_of_total_list)
    TP_res, FP_res, FN_res, TN_res = sum(TP_list), sum(FP_list), sum(
        FN_list), sum(TN_list)
    return acc, TP_res, FP_res, FN_res, TN_res
    def load_cost_volume(self, sample):
        cv_path = sample.cost_volume_path
        cv = cost_volume.CostVolume()
        if cv_path[-3:] == 'bin':
            img_shape = image_io.read(sample.left_image_path).shape
            cv.load_bin(cv_path, img_shape[0], img_shape[1], self.cost_volume_depth)

        elif cv_path[-3:] == 'dat':
            cv.load_dat(cv_path)

        else:
            with open(cv_path, 'rb') as file:
                cv = pickle.load(file)

        if sample.cost_volume_depth > self.cost_volume_depth:
            cv.reduce_depth(self.cost_volume_depth)

        cv.normalise(self.cv_norm[0], self.cv_norm[1])
        return cv
예제 #11
0
def sift_matcher(img, price_rect):
    logos = ['cablevision', 'itba', 'medicus', 'movistar']
    # logos = ['cablevision']
    max_score = 0
    company = None
    result_img = None
    for logo in logos:
        logo_img = io.read('./images/logos/%s.jpg' % logo)
        res_img, score = sift_comparison(img, logo_img)
        print('%s: %d' % (logo, score))
        if score > max_score:
            max_score = score
            company = logo
            result_img = res_img
    if max_score < 20:
        company = 'Desconocido'

    price = find_price(img, price_rect)

    return [company, price, result_img]
    min = image.min()
    max_min = max - min
    normal = (image - min) / max_min
    #normal *= 65535
    normal *= 255
    #normal = normal.astype(np.uint16)
    normal = normal.astype(np.uint8)
    return normal, max, min


prefix = INPUT_DIR + "extracted/resized/"
print("prefix =", prefix)
for root, dirs, files in os.walk(prefix):
    files.sort()
    accumulated_name = files.pop(0)
    accumulated_image = image_io.read(prefix + accumulated_name).astype(
        np.float32)
    counter = 1
    for next_name in files:
        next_image = image_io.read(prefix + next_name).astype(np.float32)
        accumulated_image += next_image
        counter += 1
        output_image = accumulated_image / counter
        print("Normalizing", end=' ', flush=True)
        output_image, max, min = normalize(output_image)
        print(max, min)
        print(f"Writting output_{counter-2}.tiff")
        cv.imwrite(f"output_{counter-2}.tiff", output_image)
        if counter > MAX_NUMBER_OF_IMAGES:
            break

print("done")
 def load_est(self, index):
     path = self.results_path + str(index).rjust(6, '0') + '_10' + '/DispMap.png'
     disp_est = image_io.read(path)
     return disp_est
 def load_gt(self, index):
     path = self.data_path + 'disp_occ/' + str(index).rjust(6, '0') + '_10.png'
     disp_gt = image_io.read(path)
     return disp_gt
 def load_indicator(self, index):
     path = self.data_path + 'mask_indicator/' + str(index).rjust(6, '0') + '_10.png'
     mask_indicator = image_io.read(path)
     return mask_indicator
예제 #16
0
def read_image_to(target_panel, path):
    img_array = image_io.read(path)
    img = manager.to_tk_image(img_array)
    target_panel.configure(image=img)
    target_panel.image = img
    return img_array
예제 #17
0
import os
import cv2 as cv
import numpy as np
import image_io
#from matplotlib import pyplot as plt

# INPUT_DIR/template.tiff
# INPUT_DIR/originals/*.tiff

#INPUT_DIR = "$HOME/Pictures/jupiter-saturno 2020-12-24-c/"
INPUT_DIR = "/Users/vruiz/Pictures/jupiter-saturno 2020-12-24-c/"
#INPUT_DIR = "./"
EXTENSION = ".tiff"

print("Loading template ... ", end='')
template = image_io.read(INPUT_DIR + 'template' + EXTENSION)
template_Y = cv.cvtColor(template, cv.COLOR_BGR2GRAY).astype(np.float32)
w = template_Y.shape[0]
h = template_Y.shape[1]
print(f"done (size={template_Y.shape})")

#def normalize(image):
#    max = image.max()
#    min = image.min()
#    max_min = max - min
#    normal = (image - min) / max_min
#    #normal *= 65535
#    #normal = normal.astype(np.uint16)
#    return normal, max, min

prefix = INPUT_DIR + "full_size/"
예제 #18
0
 def read_next_img(self):
     path = img_format.format(self.current_frame)
     img = image_io.read(path)
     self.manager.put_into('original-up', img)
예제 #19
0
 def load_file(file_path):
     try:
         file = read(file_path + '.png').astype(float)
     except:
         file = read(file_path + '.pfm')
     return file
예제 #20
0
import numpy as np
import cv2 as cv
import os
import astroalign as aa
import image_io

MAX_NUMBER_OF_IMAGES = 500
MAX_CONTROL_POINTS = 300  # Default value 50
DETECTION_SIGMA = 2  # Default value 5
MIN_AREA = 2  # Default value 5
INPUT_DIR = "/Users/vruiz/Pictures/jupiter-saturno 2020-12-24-c/"
EXTENSION = ".tiff"

# Dark image. This image registerizing tool supposes that all the dark
# image is the same for all the input images.
dark_image = image_io.read(INPUT_DIR + "dark" + EXTENSION).astype(np.float32)


def normalize(image):
    max = image.max()
    min = image.min()
    max_min = max - min
    normal = (image - min) / max_min
    #normal *= 65535
    normal *= 255
    #normal = normal.astype(np.uint16)
    normal = normal.astype(np.uint8)
    return normal, max, min


prefix = INPUT_DIR + "full_size/"
예제 #21
0
import __init__
import image_io
import visualizer
import cv2

IMG_FILE = '../images/TEST.PGM'
img = image_io.read(IMG_FILE)

choosing = False
start = (0, 0)


def click(event, x, y, flags, param):
    global choosing
    if event == cv2.EVENT_LBUTTONDOWN:
        choosing = True
        global start
        start = (x, y)
    elif choosing and event == cv2.EVENT_LBUTTONUP:
        choosing = False
        minX, maxX = (min(start[0], x), max(start[0], x))
        minY, maxY = (min(start[1], y), max(start[1], y))
        total = (maxX - minX) * (maxY - minY)
        if (len(img[0][0]) == 0):
            totalValue = [0]
        else:
            totalValue = [0, 0, 0]
        for i in range(maxX - minX):
            for j in range(maxY - minY):
                pixel = img[minY + j][minX + i]
                for k in range(len(pixel)):
예제 #22
0
    mask_tex_path = 'mask_textureless/'
    mask_disc_path = 'mask_discont/'
    mask_occ_path = 'mask_occlusions/'
    mask_indicator_path = 'mask_indicator_wo_disc/'
    if not os.path.exists(os.path.join(data_path, mask_tex_path)):
        os.mkdir(os.path.join(data_path, mask_tex_path))
    if not os.path.exists(os.path.join(data_path, mask_disc_path)):
        os.mkdir(os.path.join(data_path, mask_disc_path))
    if not os.path.exists(os.path.join(data_path, mask_indicator_path)):
        os.mkdir(os.path.join(data_path, mask_indicator_path))

    folder = sorted(os.listdir(os.path.join(data_path, mask_occ_path)))

    for img_name in folder:
        # img_name = str(img_idx).rjust(6, '0') + '_10.png'
        gt_disparity = read(data_path + disp_path + img_name)
        left_image = read(data_path + left_image_path + img_name)

        # mask_tex = compute_textureless(left_image, textureless_width, textureless_thresh)
        # write(data_path + mask_tex_path + img_name, mask_tex)

        # mask_disc = compute_disparity_discont(gt_disparity, disp_gap, discont_width)
        # write(data_path + mask_disc_path + img_name, mask_disc)

        mask_tex = read(data_path + mask_tex_path + img_name)
        mask_disc = read(data_path + mask_disc_path + img_name)
        mask_occ = read(data_path + mask_occ_path + img_name)

        mask_indicator = compute_good_region(gt_disparity, mask_tex, None,
                                             mask_occ)
        write(data_path + mask_indicator_path + img_name, mask_indicator)
예제 #23
0
    max = image.max()
    min = image.min()
    max_min = max - min
    normal = (image - min) / max_min
    #normal *= 65535
    normal *= 255
    #normal = normal.astype(np.uint16)
    normal = normal.astype(np.uint8)
    return normal, max, min


prefix = INPUT_DIR + "extracted/resized/"
for root, dirs, files in os.walk(prefix):
    files.sort()
    source_name = files.pop(0)
    source_image = image_io.read(prefix + source_name).astype(np.float32)
    #source_image -= dark_image
    counter = 1
    for target_name in files:
        source_image_luma = normalize(
            cv.cvtColor(source_image, cv.COLOR_BGR2GRAY))[0]
        #_, source_image_luma = cv.threshold(
        #    src=source_image_luma,
        #    thresh=95,
        #    maxval=255,
        #       #     type=cv.THRESH_OTSU
        #    type=cv.THRESH_BINARY
        #)
        # src, maxValue, adaptiveMethod, thresholdType, blockSize, C
        #source_image_luma = cv.adaptiveThreshold(src=source_image_luma,
        #                                         maxValue=255,