예제 #1
0
def _evaluate(cfg: CN,
              dataset: Datasets,
              output_folder: Path,
              find_mistakes: bool = False,
              include_heading: bool = False) -> str:
    mistakes = 0
    total = 0
    folder = URI("data://render") / dataset.value
    for img_file in folder.glob("*.png"):
        total += 1
        img = cv2.imread(str(img_file))
        json_file = folder / f"{img_file.stem}.json"
        with json_file.open("r") as f:
            label = json.load(f)
        actual = np.array(label["corners"])

        try:
            predicted = find_corners(cfg, img)
        except Exception:
            predicted = None

        if predicted is not None:
            actual = sort_corner_points(actual)
            predicted = sort_corner_points(predicted)

        if predicted is None or np.linalg.norm(actual - predicted,
                                               axis=-1).max() > 10.:
            mistakes += 1
    return mistakes, total
예제 #2
0
def train_classifier(name: str):
    """Set up CLI interface for training a classifier.

    Args:
        name (str): the name of the classifier (`"occupancy_classifier"` or `"piece_classifier"`)
    """
    configs_dir = URI("config://") / name

    def _train(config: str):
        cfg = CN.load_yaml_with_base(configs_dir / f"{config}.yaml")
        run_dir = URI("runs://") / name / config

        # Train the model and save it
        train(cfg, run_dir)

    # Read available configs
    configs = [x.stem for x in configs_dir.glob("*.yaml")
               if not x.stem.startswith("_")]

    # Set up argument parser
    parser = argparse.ArgumentParser(description="Train the network.")
    parser.add_argument("--config", help="the configuration to train (default: all)",
                        type=str, choices=configs, default=None)
    args = parser.parse_args()

    # Train
    if args.config is None:
        logger.info("Training all configurations one by one")
        for config in configs:
            _train(config)
    else:
        logger.info(f"Training the {args.config} configuration")
        _train(args.config)
예제 #3
0
def create_configs(classifier: str, include_centercrop: bool = False):
    """Create the YAML configuration files for all registered models for a classifier.

    Args:
        classifier (str): the classifier (either `"occupancy_classifier"` or `"piece_classifier"`)
        include_centercrop (bool, optional): whether to create two configs per model, one including center crop and one not. Defaults to False.
    """
    config_dir = URI("config://") / classifier

    logger.info(f"Removing YAML files from {config_dir}.")
    for f in config_dir.glob("*.yaml"):
        if not f.name.startswith("_"):
            f.unlink()

    for name, model in MODELS_REGISTRY[classifier.upper()].items():
        for center_crop in ({True, False} if include_centercrop else {False}):
            config_file = config_dir / \
                (name + ("_centercrop" if center_crop else "") + ".yaml")
            logging.info(f"Writing configuration file {config_file}")

            size = model.input_size
            C = CN()
            override_base = f"config://{classifier}/_base_override_{name}.yaml"
            if URI(override_base).exists():
                C._BASE_ = override_base
            else:
                suffix = "_pretrained" if model.pretrained else ""
                C._BASE_ = f"config://{classifier}/_base{suffix}.yaml"
            C.DATASET = CN()
            C.DATASET.TRANSFORMS = CN()
            C.DATASET.TRANSFORMS.CENTER_CROP = (50, 50) \
                if center_crop else None
            C.DATASET.TRANSFORMS.RESIZE = size
            C.TRAINING = CN()
            C.TRAINING.MODEL = CN()
            C.TRAINING.MODEL.REGISTRY = classifier.upper()
            C.TRAINING.MODEL.NAME = name

            with config_file.open("w") as f:
                C.dump(stream=f)
예제 #4
0
import numpy as np
from logging import getLogger
from recap import URI
import argparse

logger = getLogger(__name__)

if __name__ == "__main__":
    argparse.ArgumentParser(
        description="Split the dataset into train/val/test.").parse_args()

    val_split = .03
    test_split = .1
    render_dir = URI("data://render")
    ids = np.array([x.stem for x in render_dir.glob("*.json")])
    if len(ids) == 0:
        logger.warning(
            "No samples found in 'data://render', either you did not download the datset yet or you have already split it."
        )
    np.random.seed(42)
    ids = np.random.permutation(ids)
    sample_sizes = (np.array([val_split, test_split]) * len(ids)).astype(
        np.int32)
    val, test, train = np.split(ids, sample_sizes)
    datasets = {"val": val, "test": test, "train": train}
    print(
        f"{len(ids)} samples will be split into {len(train)} train, {len(val)} val, {len(test)} test."
    )

    for dataset_name, ids in datasets.items():