Beispiel #1
0
    def __init__(self, language_model: seq2seq.Seq2Seq = None):
        """Initialize the architecture."""
        super().__init__(NAME)

        if language_model is None:
            self.language_model = seq2seq.Gru()
        else:
            self.language_model = language_model

        self.scaling_ghi = preprocessing.min_max_scaling_ghi()

        self.flatten = layers.Flatten()
        self.max_pool = layers.MaxPooling3D((1, 2, 2))

        self.conv1 = layers.Conv3D(64,
                                   kernel_size=(1, 3, 3),
                                   padding="same",
                                   activation="relu")
        self.conv2 = layers.Conv3D(128,
                                   kernel_size=(1, 3, 3),
                                   padding="same",
                                   activation="relu")
        self.conv3 = layers.Conv3D(128,
                                   kernel_size=(1, 3, 3),
                                   padding="same",
                                   activation="relu")

        self.d1 = layers.Dense(512, activation="relu")
        self.d2 = layers.Dense(256, activation="relu")
        self.d3 = layers.Dense(4)
    def __init__(
        self,
        model: Model,
        predict_ghi=True,
        batch_size=128,
        skip_non_cached=False,
    ):
        """Initialize a training session."""
        mse = losses.MeanSquaredError()

        def rmse(pred, target):
            """Wraper around TF MSE Loss."""
            return mse(pred, target)**0.5

        self.loss_fn = rmse
        self.model = model
        self.predict_ghi = predict_ghi
        self.batch_size = batch_size
        self.skip_non_cached = skip_non_cached

        self.scaling_ghi = preprocessing.min_max_scaling_ghi()

        self.metrics = {
            "train": tf.keras.metrics.Mean("train loss", dtype=tf.float32),
            "valid": tf.keras.metrics.Mean("valid loss", dtype=tf.float32),
            "test": tf.keras.metrics.Mean("test loss", dtype=tf.float32),
        }

        self.history = History()
    def __init__(self,
                 encoder=None,
                 num_images=6,
                 time_interval_min=30,
                 dropout=0.20):
        """Initialize the architecture."""
        super().__init__(NAME)
        self.num_images = num_images
        self.time_interval_min = time_interval_min

        self.scaling_image = preprocessing.min_max_scaling_images()
        self.scaling_ghi = preprocessing.min_max_scaling_ghi()

        if encoder is None:
            self.encoder = autoencoder.Encoder()
            self.encoder.load(autoencoder.BEST_MODEL_WEIGHTS)
        else:
            self.encoder = encoder

        self.flatten = layers.Flatten()
        self.dropout = layers.Dropout(dropout)

        self.gru1 = layers.GRU(1024, return_sequences=True)
        self.gru2 = layers.GRU(512)

        self.d1 = layers.Dense(512)
        self.d2 = layers.Dense(256)
        self.d3 = layers.Dense(128)
        self.d4 = layers.Dense(4)
    def __init__(self):
        """Initialize the architecture."""
        super().__init__(NAME)
        self.scaling_image = preprocessing.min_max_scaling_images()
        self.scaling_ghi = preprocessing.min_max_scaling_ghi()

        self.conv1 = self._convolution_step((5, 5), 64)
        self.conv2 = self._convolution_step((3, 3), 128)
        self.conv3 = self._convolution_step((3, 3), 256)

        self.flatten = Flatten()

        self.d1 = Dense(512, activation="relu")
        self.d2 = Dense(256, activation="relu")
        self.d3 = Dense(1)
Beispiel #5
0
    def __init__(self, encoder: autoencoder.Encoder = None):
        """Initialize the architecture."""
        super().__init__(NAME)
        self.scaling_image = preprocessing.min_max_scaling_images()
        self.scaling_ghi = preprocessing.min_max_scaling_ghi()

        if encoder is None:
            self.encoder = autoencoder.Encoder()
            self.encoder.load(autoencoder.BEST_MODEL_WEIGHTS)
        else:
            self.encoder = encoder

        self.flatten = Flatten()

        self.d1 = Dense(512, activation="relu")
        self.d2 = Dense(128, activation="relu")
        self.d3 = Dense(1)
    def __init__(self, num_images=4, time_interval_min=60):
        """Initialize the architecture."""
        super().__init__(NAME)
        self.scaling_image = preprocessing.min_max_scaling_images()
        self.scaling_ghi = preprocessing.min_max_scaling_ghi()
        self.num_images = num_images
        self.time_interval_min = time_interval_min

        self.inputdropout = layers.Dropout(0.5)
        self.conv1a = layers.Conv3D(64, (3, 3, 3), padding="same")
        self.pool1 = layers.MaxPooling3D(pool_size=(1, 2, 2), padding="same")
        self.dropout1 = layers.Dropout(0.1)
        self.batchnorm1 = layers.BatchNormalization()

        self.conv2a = layers.Conv3D(128, (3, 3, 3), padding="same")
        self.pool2 = layers.MaxPooling3D(pool_size=(2, 2, 2), padding="same")
        self.dropout2 = layers.Dropout(0.1)
        self.batchnorm2 = layers.BatchNormalization()

        self.conv3a = layers.Conv3D(256, (3, 3, 3), padding="same")
        self.conv3b = layers.Conv3D(256, (3, 3, 3), padding="same")
        self.pool3 = layers.MaxPooling3D(pool_size=(2, 2, 2), padding="same")
        self.dropout3 = layers.Dropout(0.1)
        self.batchnorm3 = layers.BatchNormalization()

        self.conv4a = layers.Conv3D(512, (3, 3, 3), padding="same")
        self.conv4b = layers.Conv3D(512, (3, 3, 3), padding="same")
        self.pool4 = layers.MaxPooling3D(pool_size=(2, 2, 2), padding="same")
        self.dropout4 = layers.Dropout(0.1)
        self.batchnorm4 = layers.BatchNormalization()

        self.conv5a = layers.Conv3D(512, (3, 3, 3), padding="same")
        self.conv5b = layers.Conv3D(512, (3, 3, 3), padding="same")
        self.pool5 = layers.MaxPooling3D(pool_size=(2, 2, 2), padding="same")
        self.dropout5 = layers.Dropout(0.1)
        self.batchnorm5 = layers.BatchNormalization()

        self.flatten = layers.Flatten()
        self.d1 = layers.Dense(1048, activation="relu")
        self.d2 = layers.Dense(521, activation="relu")
        self.d3 = layers.Dense(256, activation="relu")
        self.d4 = layers.Dense(256, activation="relu")
        self.d5 = layers.Dense(4)
Beispiel #7
0
    def __init__(
        self,
        encoder=None,
        num_images=6,
        time_interval_min=30,
        dropout=0.25,
        crop_size=64,
    ):
        """Initialize the architecture."""
        super().__init__(NAME)
        self.num_images = num_images
        self.time_interval_min = time_interval_min
        self.crop_size = crop_size
        self.scaling_image = preprocessing.min_max_scaling_images()
        self.scaling_ghi = preprocessing.min_max_scaling_ghi()

        if encoder is None:
            self.encoder = autoencoder.Encoder()
            self.encoder.load(autoencoder.BEST_MODEL_WEIGHTS)
        else:
            self.encoder = encoder

        self.flatten = layers.Flatten()
        self.dropout = layers.Dropout(dropout)

        self.conv1 = layers.Conv3D(
            32, kernel_size=(3, 3, 3), padding="same", activation="relu"
        )
        self.conv2 = layers.Conv3D(
            32, kernel_size=(3, 3, 3), padding="same", activation="relu"
        )
        self.conv3 = layers.Conv3D(
            32, kernel_size=(3, 3, 3), padding="same", activation="relu"
        )
        self.conv4 = layers.Conv3D(
            32, kernel_size=(3, 3, 3), padding="same", activation="relu"
        )

        self.max_pool = layers.MaxPooling3D((2, 2, 2))

        self.d1 = layers.Dense(256)
        self.d2 = layers.Dense(128)
        self.d3 = layers.Dense(4)
    def __init__(self):
        """Initialize the architecture."""
        super().__init__(NAME_CLEARSKY_MATHE)
        self.scaling_image = preprocessing.min_max_scaling_images()
        self.scaling_ghi = preprocessing.min_max_scaling_ghi()

        # Conv2D(channels, kernel_size=kernel_size, activation="relu")
        self.conv1 = Conv2D(64, kernel_size=(3, 3), activation="relu")
        self.conv2 = Conv2D(64, kernel_size=(3, 3), activation="relu")
        self.dropout2 = Dropout(0.1)
        self.maxpool1 = MaxPooling2D(pool_size=(2, 2))
        self.conv3 = Conv2D(128, kernel_size=(3, 3), activation="relu")
        self.conv4 = Conv2D(128, kernel_size=(3, 3), activation="relu")
        self.conv5 = Conv2D(256, kernel_size=(3, 3), activation="relu")
        self.conv6 = Conv2D(256, kernel_size=(3, 3), activation="relu")
        self.maxpool2 = MaxPooling2D(pool_size=(2, 2))
        self.flatten = Flatten()
        self.d1 = Dense(1048, activation="relu")
        self.d2 = Dense(512, activation="relu")
        self.d3 = Dense(256, activation="relu")
        self.d4 = Dense(256, activation="relu")
        self.d5 = Dense(4)
def generate_predictions(
    data_loader: tf.data.Dataset, model: tf.keras.Model, pred_count: int
) -> np.ndarray:
    """Generate and returns model predictions given the data prepared by a data loader."""
    predictions = []
    scaling_ghi = preprocessing.min_max_scaling_ghi()
    with tqdm.tqdm("generating predictions", total=pred_count) as pbar:
        for iter_idx, minibatch in enumerate(data_loader.batch(64)):
            logger.info(f"Minibatch #{iter_idx}")
            assert (
                isinstance(minibatch, tuple) and len(minibatch) >= 2
            ), "the data loader should load each minibatch as a tuple with model input(s) and target tensors"
            # Call the model without the target and with training = False.
            pred = model(minibatch[0:-1]).numpy()

            # Rescale the GHI values.
            pred = scaling_ghi.original(pred)
            assert (
                pred.ndim == 2
            ), "prediction tensor shape should be BATCH x SEQ_LENGTH"
            predictions.append(pred)
            pbar.update(len(pred))
    return np.concatenate(predictions, axis=0)
Beispiel #10
0
 def __init__(self):
     """Initialize the architecture."""
     super().__init__(NAME)
     self.scaling_ghi = preprocessing.min_max_scaling_ghi()
 def setUp(self):
     self.scaling_ghi = preprocessing.min_max_scaling_ghi()
     self.session = Session(mock.Mock())