def plot3d():
    """Create a GIF from 3D image in the training set."""
    for channel in ["ch1", "ch2", "ch3", "ch4", "ch6"]:
        config = default_config()
        config.features = [Feature.image]
        config.channels = [channel]
        config.crop_size = (300, 300)
        config.time_interval_min = 30
        config.num_images = 10

        train_dataset, _, _ = load_data(config=config)
        images = _first_image(train_dataset)

        image_names = []
        for i, image in enumerate(images[0]):
            plt.cla()
            plt.clf()
            plt.axis("off")
            plt.tight_layout()

            image2d = image[:, :, 0]
            plt.imshow(image2d, cmap="gray")

            name = f"assets/{i}.png"
            plt.savefig(name)

            image_names.append(name)
        make_gif(f"assets/image-3d-{channel}.gif", image_names)
    def setUp(self):
        config = default_config()
        config.error_strategy = dataloader.ErrorStrategy.ignore
        config.features = [dataloader.Feature.target_ghi]

        metadata_loader = metadata.MetadataLoader(file_name=CATALOG_PATH)
        self.dataset = dataloader.create_dataset(
            lambda: metadata_loader.load(A_STATION, A_STATION_COORDINATE),
            config=config)
Exemplo n.º 3
0
    def config(self) -> dataloader.DataloaderConfig:
        """Configuration."""
        config = default_config()
        config.features = [
            dataloader.Feature.metadata,
            dataloader.Feature.target_ghi,
        ]

        return config
    def config(self) -> dataloader.DataloaderConfig:
        """Configuration."""
        config = default_config()
        config.num_images = 1
        config.features = [
            dataloader.Feature.image, dataloader.Feature.target_ghi
        ]

        return config
Exemplo n.º 5
0
    def config(self) -> dataloader.DataloaderConfig:
        """Configuration."""
        config = default_config()
        config.num_images = self.num_images
        config.time_interval_min = self.time_interval_min
        config.skip_missing_past_images = True
        config.features = [dataloader.Feature.image]

        return config
Exemplo n.º 6
0
    def __init__(self):
        loader = MetadataLoader(CATALOG_PATH)

        config = default_config()
        config.error_strategy = dataloader.ErrorStrategy.ignore
        config.features = [dataloader.Feature.target_ghi]

        self.dataset = dataloader.create_dataset(
            lambda: loader.load(STATION, COORDINATES, skip_missing=False),
            config=config)
Exemplo n.º 7
0
    def config(self) -> dataloader.DataloaderConfig:
        """Configuration."""
        config = default_config()
        config.num_images = self.num_images
        config.ratio = 0.1
        config.time_interval_min = 30
        config.features = [
            dataloader.Feature.image, dataloader.Feature.target_ghi
        ]

        return config
Exemplo n.º 8
0
    def config(self) -> dataloader.DataloaderConfig:
        """Configuration."""
        config = default_config()
        config.num_images = self.language_model.num_images
        config.time_interval_min = self.language_model.time_interval_min
        config.features = [
            dataloader.Feature.image,
            dataloader.Feature.metadata,
            dataloader.Feature.target_ghi,
        ]

        return config
Exemplo n.º 9
0
    def __init__(self, num_images):
        self.num_images = num_images

        config = default_config()
        config.features = [dataloader.Feature.image]

        def gen():
            for _ in range(num_images):
                yield Metadata(IMAGE_PATH, "8bits", 10, datetime.now(),
                               COORDINATES)

        self.dataset = dataloader.create_dataset(lambda: gen(), config=config)
    def config(self, dry_run=False) -> dataloader.DataloaderConfig:
        """Configuration."""
        config = default_config()
        config.num_images = 1
        config.ratio = 1
        config.features = [
            dataloader.Feature.target_ghi,
            dataloader.Feature.metadata,
            dataloader.Feature.image,
            dataloader.Feature.target_ghi,
        ]

        if dry_run:
            config.error_strategy = dataloader.ErrorStrategy.stop
        return config
    def __init__(self, dropout=0.3, crop_size=32):
        """Initialize the autoencoder."""
        super().__init__(NAME_AUTOENCODER)
        self.scaling_image = preprocessing.MinMaxScaling(
            preprocessing.IMAGE_MIN, preprocessing.IMAGE_MAX)
        self.crop_size = crop_size

        self.default_config = default_config()
        self.default_config.num_images = 1
        self.default_config.features = [dataloader.Feature.image]

        num_channels = len(self.default_config.channels)

        self.encoder = Encoder(dropout=dropout)
        self.decoder = Decoder(num_channels, dropout=dropout)
def find_target_ghi_minmax_value(dataset=None):
    """Find the minimum value of target ghi.

    The values are found based on the training dataset.

    Return:
        Tuple with (max_value, min_value)
    """
    if dataset is None:
        config = default_config()
        config.features = [dataloader.Feature.target_ghi]
        dataset, _, _ = load_data(config=config)

    max_value = dataset.reduce(0.0, _reduce_max)
    min_value = dataset.reduce(max_value, _reduce_min)

    return max_value, min_value
    def test_iterate_multiple_times(self):
        config = train.default_config()
        config.error_strategy = dataloader.ErrorStrategy.ignore
        config.features = [dataloader.Feature.target_ghi]
        metadata_loader = metadata.MetadataLoader(file_name=CATALOG_PATH)
        dataset = dataloader.create_dataset(
            lambda: metadata_loader.load(A_STATION, A_STATION_COORDINATE),
            config=config)

        first_run_data = 0
        for data in dataset:
            first_run_data += 1

        second_run_data = 0
        for data in dataset:
            second_run_data += 1

        self.assertEqual(first_run_data, second_run_data)
def cache(size, cache_dir):
    config = default_config()
    config.features = [Feature.image]
    config.crop_size = size

    # Create image cache dir
    config.image_cache_dir = cache_dir + f"/image_cache_{size}"
    config.image_cache_dir = config.image_cache_dir.replace("(", "")
    config.image_cache_dir = config.image_cache_dir.replace(",", "")
    config.image_cache_dir = config.image_cache_dir.replace(")", "")
    config.image_cache_dir = config.image_cache_dir.replace(" ", "-")

    logger.info(
        f"Caching images with size {size} in dir {config.image_cache_dir}")

    dataset_train, dataset_valid, dataset_test = load_data(
        enable_tf_caching=False, config=config)

    _create_cache("train", dataset_train)
    _create_cache("valid", dataset_valid)
    _create_cache("test", dataset_test)

    os.system(f"tar -cf {config.image_cache_dir}.tar {config.image_cache_dir}")