コード例 #1
0
def test_catch_invalid_scene_name():
    """Test catching invalid scene name"""
    scene_name = "Invalid scene name"
    with pytest.raises(InvalidSceneName):
        InvalidSceneName.validate_scene_list(scene_name,
                                             Dataset.get_scene_list())
    with pytest.raises(InvalidSceneName):
        InvalidSceneName.validate_scene_info_list(
            scene_name, Dataset.get_training_scene_list())
コード例 #2
0
def test_2003_disparity():
    """Test 2003 load disparity image"""
    ssl._create_default_https_context = ssl._create_unverified_context
    DATASET_FOLDER = os.path.join(os.getcwd(),"datasets") #Path to download datasets
    if not os.path.exists(DATASET_FOLDER):
        os.makedirs(DATASET_FOLDER)
    scene_info = SceneInfo(Dataset.Teddy, DatasetType.imperfect, 1.0)
    scene_name = scene_info.scene_name
    dataset_type = scene_info.dataset_type
    Dataset.download_scene_data(scene_name,DATASET_FOLDER,dataset_type)
    Dataset.load_scene_data(
        scene_name=scene_name,dataset_folder=DATASET_FOLDER,
        dataset_type=dataset_type)
コード例 #3
0
def test_catch_invalid_scene_name():
    """Test valid scene names"""
    with pytest.raises(InvalidSceneName):
        InvalidSceneName.validate_scene_list("INVALID SCENE NAME", Dataset.get_scene_list())
コード例 #4
0
def test_check_valid_scene_names():
    """Test valid scene names"""
    for scene_name in Dataset.get_scene_list():
        assert InvalidSceneName.validate_scene_list(scene_name, Dataset.get_scene_list()) is None
コード例 #5
0
def test_dataset_valid_scene_urls():
    """Test valid url creation for scenes"""
    for scene_name in Dataset.get_scene_list():
        url = Dataset.get_url_from_scene(scene_name)
        assert validators.url(url)
コード例 #6
0
def test_init_dataset():
    """Test initalising Dataset class"""
    Dataset()
コード例 #7
0
This module demonstrates loading data from one scene using the stereomideval module.
"""
import os
from stereomideval.dataset import Dataset

# Path to dowmload datasets
DATASET_FOLDER = os.path.join(os.getcwd(), "datasets")
# Scene name (see here for list of scenes: https://vision.middlebury.edu/stereo/data/scenes2014/)
SCENE_NAME = "Adirondack"
# Display loaded scene to OpenCV window
DISPLAY_IMAGES = False

# Create dataset folder
if not os.path.exists(DATASET_FOLDER):
    os.makedirs(DATASET_FOLDER)

# Download dataset from middlebury servers
# will only download it if it hasn't already been downloaded
print("Downloading data for scene '" + SCENE_NAME + "'...")
Dataset.download_scene_data(SCENE_NAME, DATASET_FOLDER)
# Load scene data from downloaded folder
print("Loading data for scene '" + SCENE_NAME + "'...")
scene_data = Dataset.load_scene_data(SCENE_NAME, DATASET_FOLDER,
                                     DISPLAY_IMAGES)
# Scene data class contains the following data:
left_image = scene_data.left_image
right_image = scene_data.right_image
ground_truth_disp_image = scene_data.disp_image
ndisp = scene_data.ndisp
コード例 #8
0
This module demonstrates loading data from all scenes using the stereomideval module.
"""
import os
from stereomideval.dataset import Dataset

DATASET_FOLDER = os.path.join(os.getcwd(),
                              "datasets")  #Path to download datasets
DISPLAY_IMAGES = False

# Create dataset folder
if not os.path.exists(DATASET_FOLDER):
    os.makedirs(DATASET_FOLDER)

# Get list of scenes in Milddlebury's stereo training dataset and iterate through them
for scene_info in Dataset.get_training_scene_list():
    scene_name = scene_info.scene_name
    dataset_type = scene_info.dataset_type
    # Download dataset from middlebury servers
    # will only download it if it hasn't already been downloaded
    print("Downloading data for scene '" + scene_name + "'...")
    Dataset.download_scene_data(scene_name, DATASET_FOLDER, dataset_type)
    # Load scene data from downloaded folder
    print("Loading data for scene '" + scene_name + "'...")
    scene_data = Dataset.load_scene_data(scene_name=scene_name,
                                         dataset_folder=DATASET_FOLDER,
                                         dataset_type=dataset_type,
                                         display_images=DISPLAY_IMAGES)
    # Scene data class contains the following data:
    left_image = scene_data.left_image
    right_image = scene_data.right_image
コード例 #9
0
from stereomideval.structures import MatchData
from stereomideval.dataset import Dataset
from stereomideval.eval import Eval, Timer

DATASET_FOLDER = os.path.join(os.getcwd(),
                              "datasets")  #Path to download datasets
GET_METRIC_RANK = False  # Compare each match data against online ranking
GET_AV_METRIC_RANK = True  # Compare average results across all scenes against online ranking

# Create dataset folder
if not os.path.exists(DATASET_FOLDER):
    os.makedirs(DATASET_FOLDER)

match_data_list = []
# Get list of scenes in Milddlebury's stereo training dataset and iterate through them
for scene_info in Dataset.get_training_scene_list():
    scene_name = scene_info.scene_name
    dataset_type = scene_info.dataset_type
    # Download dataset from middlebury servers
    # will only download it if it hasn't already been downloaded
    print("Downloading data for scene '" + scene_name + "'...")
    Dataset.download_scene_data(scene_name, DATASET_FOLDER, dataset_type)
    # Load scene data from downloaded folder
    print("Loading data for scene '" + scene_name + "'...")
    scene_data = Dataset.load_scene_data(scene_name=scene_name,
                                         dataset_folder=DATASET_FOLDER,
                                         dataset_type=dataset_type)
    # Scene data class contains the following data:
    left_image = scene_data.left_image
    right_image = scene_data.right_image
    ground_truth_disp_image = scene_data.disp_image
コード例 #10
0
    def get_metric_vals(metric,
                        scene_name,
                        dataset_type=DatasetType.imperfect,
                        dense=True):
        """
        Get list of metrics values from Middlebury website

        Currently assumes using training dense set with nonocc mask

        Parameters:
            metric (Eval.Metric): metric list to get values for
            scene_name (string): scene name to get values for

        Returns:
            alg_names (list(string)): list of algorithm names
            metric_list (list(string)): list of values for given metric
        """
        # Check scene name is valid
        InvalidSceneName.validate_scene_info_list(
            scene_name, Dataset.get_training_scene_list())
        dense_or_sparse = "dense"
        if not dense:
            dense_or_sparse = "sparse"
        # Get url for table on Middlebury website
        url = WebscrapeMiddlebury.get_table_url("training", dense_or_sparse,
                                                metric, "nonocc", False)
        # disable warnings to avoid message:
        # 'InsecureRequestWarning: Unverified HTTPS request
        # is being made to host 'vision.middlebury.edu'.
        # Adding certificate verification is strongly advised.'
        urllib3.disable_warnings()
        # Load url
        print("Loading results from Middlebury website...")
        page = requests.get(url, verify=False)
        # Parse webpage
        soup = BeautifulSoup(page.content, 'html.parser')
        # Get stereo table from webpage
        table = soup.find('table', {'id': 'stereoTable'})
        # Find table body
        table_body = table.find('tbody')
        # Find table body rows
        table_rows = table_body.findAll('tr')
        # Initalise lists for storing result of table
        alg_names_list = []
        metric_list = []
        web_scene_name_suffix = WebscrapeMiddlebury.get_web_scene_name_suffix(
            dataset_type)
        # (e.g. trying to compare image that is not on website table)
        # Iteration through rows in table
        for table_row in list(table_rows):
            # Find algorithm name in row data
            alg_name = table_row.find('td', {'class': ['algName']})
            # Find metric value in row data
            metric_val = table_row.find('td', {
                "class": [
                    "{} data datasetCol".format(scene_name +
                                                web_scene_name_suffix)
                ]
            },
                                        partial=False)
            # Check if metric value was found
            if metric_val is None:
                # Find metric value with 'firstPlace'
                # (the first row in the data has this extra class definition)
                metric_val = table_row.find('td', {
                    "class": [
                        "{} firstPlace data datasetCol".format(
                            scene_name + web_scene_name_suffix)
                    ]
                },
                                            partial=False)
            # Check metric value and algorithm name were found
            if metric_val is not None and alg_name is not None:
                # Add metric value and algorithm name to lists
                metric_list.append(float(metric_val.string))
                alg_names_list.append(alg_name.string)
            else:
                raise Exception(
                    "Failed to find table data for metric value or algorithm name"
                )
        return alg_names_list, metric_list