コード例 #1
0
ファイル: Autoencoder.py プロジェクト: vanstrn/MV
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Autoencoder test method. This implementation runs about 4-8% slower than the base Autoencoding method."""

        self.HPs.update({
            "LearningRate": 0.00005,
            "LatentSize": 64,
            "Optimizer": "Adam",
            "Epochs": 10,
            "BatchSize": 64,
        })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "EncNetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec = dataset.inputSpec
        networkConfig.update(dataset.outputSpec)
        networkConfig.update({"LatentSize": self.HPs["LatentSize"]})
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],
                                     {"latent": self.HPs["LatentSize"]},
                                     variables=networkConfig,
                                     printSummary=True)
        self.Encoder = CreateModel(self.HPs["EncNetworkConfig"],
                                   dataset.inputSpec,
                                   variables=networkConfig,
                                   printSummary=True)

        self.optimizer = GetOptimizer(self.HPs["Optimizer"],
                                      self.HPs["LearningRate"])
        self.mse = tf.keras.losses.MeanSquaredError()
コード例 #2
0
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Models for the method """

        self.HPs.update({
            "LearningRate": 0.0001,
            "LatentSize": 64,
            "Optimizer": "Adam",
            "Epochs": 10,
        })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "DiscNetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec = dataset.inputSpec
        networkConfig.update(dataset.outputSpec)
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],
                                     {"latent": self.HPs["LatentSize"]},
                                     variables=networkConfig,
                                     printSummary=True)
        self.Discriminator = CreateModel(self.HPs["DiscNetworkConfig"],
                                         dataset.inputSpec,
                                         variables=networkConfig,
                                         printSummary=True)

        self.generatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                               self.HPs["LearningRate"])
        self.discriminatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                                   self.HPs["LearningRate"])
        self.crossEntropy = tf.keras.losses.BinaryCrossentropy(
            from_logits=False)
コード例 #3
0
ファイル: Autoencoder.py プロジェクト: vanstrn/MV
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs.update({
            "LearningRate": 0.00005,
            "LatentSize": 64,
            "Optimizer": "Adam",
            "Epochs": 10,
            "BatchSize": 64,
        })

        self.requiredParams.Append([
            "NetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec = dataset.inputSpec
        networkConfig.update(dataset.outputSpec)
        self.Model = CreateModel(self.HPs["NetworkConfig"],
                                 dataset.inputSpec,
                                 variables=networkConfig,
                                 printSummary=True)

        self.optimizer = GetOptimizer(self.HPs["Optimizer"],
                                      self.HPs["LearningRate"])
        self.mse = tf.keras.losses.MeanSquaredError()
コード例 #4
0
ファイル: Classifier.py プロジェクト: vanstrn/MV
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs = {
            "LearningRate": 0.00005,
            "Optimizer": "Adam",
            "Epochs": 10,
            "BatchSize": 64,
            "Shuffle": True,
        }

        self.requiredParams = [
            "NetworkConfig",
        ]

        #Chacking Valid hyperparameters are specified
        CheckFilled(self.requiredParams, settingsDict["NetworkHPs"])
        self.HPs.update(settingsDict["NetworkHPs"])

        #Processing Other inputs
        self.opt = GetOptimizer(self.HPs["Optimizer"],
                                self.HPs["LearningRate"])
        networkConfig.update(dataset.outputSpec)
        self.Model = CreateModel(self.HPs["NetworkConfig"],
                                 dataset.inputSpec,
                                 variables=networkConfig)
        self.Model.compile(optimizer=self.opt,
                           loss=["mse"],
                           metrics=['accuracy'])
        self.Model.summary(print_fn=log.info)

        self.LoadModel({"modelPath": "models/Test"})
コード例 #5
0
ファイル: Autoencoder.py プロジェクト: vanstrn/MV
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Autoencoder test method. This implementation runs about 5-10% slower than the base Autoencoding method."""

        self.HPs.update({
            "LearningRate": 0.00005,
            "Optimizer": "Adam",
            "Epochs": 10,
            "LatentSize": 64,
            "BatchSize": 64,
            "Shuffle": True,
        })

        self.requiredParams.Append([
            "NetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.opt = GetOptimizer(self.HPs["Optimizer"],
                                self.HPs["LearningRate"])
        networkConfig.update(dataset.outputSpec)
        networkConfig.update({"LatentSize": self.HPs["LatentSize"]})
        self.Model = CreateModel(self.HPs["NetworkConfig"],
                                 dataset.inputSpec,
                                 variables=networkConfig,
                                 printSummary=True)
        self.Model.compile(optimizer=self.opt, loss=["mse"], metrics=[])
コード例 #6
0
ファイル: Autoencoder.py プロジェクト: vanstrn/MV
class Autoencoder(BaseMethod):
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs.update({
            "LearningRate": 0.00005,
            "LatentSize": 64,
            "Optimizer": "Adam",
            "Epochs": 10,
            "BatchSize": 64,
        })

        self.requiredParams.Append([
            "NetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec = dataset.inputSpec
        networkConfig.update(dataset.outputSpec)
        self.Model = CreateModel(self.HPs["NetworkConfig"],
                                 dataset.inputSpec,
                                 variables=networkConfig,
                                 printSummary=True)

        self.optimizer = GetOptimizer(self.HPs["Optimizer"],
                                      self.HPs["LearningRate"])
        self.mse = tf.keras.losses.MeanSquaredError()

    @tf.function
    def TrainStep(self, images):

        with tf.GradientTape() as tape:
            generatedImages = self.Model(images, training=True)["Decoder"]

            loss = self.mse(images["image"], generatedImages)

        gradients = tape.gradient(loss, self.Model.trainable_variables)

        self.optimizer.apply_gradients(
            zip(gradients, self.Model.trainable_variables))

        return {"Autoencoder Loss": loss}

    def ImagesFromImage(self, testImages):
        return self.Model.predict({"image": testImages})["Decoder"]

    def LatentFromImage(self, testImages):
        return self.Model.predict({"image": testImages})["Latent"]

    def AnomalyScore(self, testImages):
        return tf.reduce_sum(
            (testImages - self.ImagesFromImage(testImages))**2,
            axis=list(range(1, len(testImages.shape))))
コード例 #7
0
ファイル: Autoencoder.py プロジェクト: vanstrn/MV
class Autoencoder_v2(BaseMethod):
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Autoencoder test method. This implementation runs about 5-10% slower than the base Autoencoding method."""

        self.HPs.update({
            "LearningRate": 0.00005,
            "Optimizer": "Adam",
            "Epochs": 10,
            "LatentSize": 64,
            "BatchSize": 64,
            "Shuffle": True,
        })

        self.requiredParams.Append([
            "NetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.opt = GetOptimizer(self.HPs["Optimizer"],
                                self.HPs["LearningRate"])
        networkConfig.update(dataset.outputSpec)
        networkConfig.update({"LatentSize": self.HPs["LatentSize"]})
        self.Model = CreateModel(self.HPs["NetworkConfig"],
                                 dataset.inputSpec,
                                 variables=networkConfig,
                                 printSummary=True)
        self.Model.compile(optimizer=self.opt, loss=["mse"], metrics=[])

    def Train(self, data, callbacks=[]):
        self.InitializeCallbacks(callbacks)
        self.Model.fit(data["image"],
                       data["image"],
                       epochs=self.HPs["Epochs"],
                       batch_size=self.HPs["BatchSize"],
                       shuffle=self.HPs["Shuffle"],
                       callbacks=self.callbacks)
        self.SaveModel("models/TestAE")

    def ImagesFromImage(self, testImages):
        return self.Model.predict({"image": testImages})["Decoder"]

    def AnomalyScore(self, testImages):
        return tf.reduce_sum(
            (testImages - self.ImagesFromImage(testImages))**2,
            axis=list(range(1, len(testImages.shape))))
コード例 #8
0
    def __init__(self,settingsDict,dataset,networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs.update({
                    "LearningRate":0.00005,
                    "LatentSize":64,
                    "Optimizer":"Adam",
                    "Epochs":10,
                    "BatchSize":32,
                    "Shuffle":True,
                     })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "DiscNetworkConfig",
            "EncNetworkConfig",
            ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec=dataset.inputSpec
        self.opt = GetOptimizer(self.HPs["Optimizer"],self.HPs["LearningRate"])
        networkConfig.update(dataset.outputSpec)
        networkConfig.update({"LatentSize":self.HPs["LatentSize"]})
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],{"latent":self.HPs["LatentSize"]},variables=networkConfig,printSummary=True)
        self.Encoder = CreateModel(self.HPs["EncNetworkConfig"],dataset.inputSpec,variables=networkConfig,printSummary=True)
        _datasetSpec = {"features":[self.HPs["LatentSize"]],**dataset.inputSpec}
        self.Discriminator = CreateModel(self.HPs["DiscNetworkConfig"],_datasetSpec,variables=networkConfig,printSummary=True)

        self.generatorOptimizer = GetOptimizer(self.HPs["Optimizer"],self.HPs["LearningRate"])
        self.discriminatorOptimizer = GetOptimizer(self.HPs["Optimizer"],self.HPs["LearningRate"])
        self.crossEntropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
コード例 #9
0
ファイル: WGAN.py プロジェクト: vanstrn/MV
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs.update({
            "LearningRate": 0.00005,
            "LatentSize": 64,
            "Optimizer": "RMS",
            "Epochs": 10,
            "DiscrimClipValue": 0.01,
            "GenUpdateFreq": 5,
        })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "DiscNetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec = dataset.inputSpec
        networkConfig.update(dataset.outputSpec)
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],
                                     {"latent": self.HPs["LatentSize"]},
                                     variables=networkConfig,
                                     printSummary=True)
        self.Discriminator = CreateModel(self.HPs["DiscNetworkConfig"],
                                         dataset.inputSpec,
                                         variables=networkConfig,
                                         printSummary=True)

        self.generatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                               self.HPs["LearningRate"])
        self.discriminatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                                   self.HPs["LearningRate"])

        self.counter = 0
コード例 #10
0
    def GenerateLatent(self, sample):
        #Building the latent network
        LatentPredNet = CreateModel(
            self.HPs["LatentNetworkConfig"],
            self.inputSpec,
            variables={"LatentSize": self.HPs["LatentSize"]})
        latentOptimizer = tf.keras.optimizers.Adam(1e-4)
        #Training the latent network
        for _ in range(self.HPs["AnomalyFitEpochs"]):
            with tf.GradientTape() as tape:
                z = LatentPredNet(sample)["Latent"]
                out = self.Generator(z)["Decoder"]
                latentLoss = tf.math.abs(out - sample)
            latentGradients = tape.gradient(latentLoss,
                                            LatentPredNet.trainable_variables)
            latentOptimizer.apply_gradients(
                zip(latentGradients, LatentPredNet.trainable_variables))

        return LatentPredNet(sample)["Latent"]
コード例 #11
0
ファイル: GANomaly.py プロジェクト: vanstrn/MV
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs.update({
            "LearningRate": 0.00005,
            "LatentSize": 16,
            "Optimizer": "Adam",
            "Epochs": 10,
            "w_adv": 1,
            "w_con": 50,
            "w_enc": 1,
        })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "EncNetworkConfig",
            "DiscNetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        networkConfig.update(dataset.outputSpec)
        networkConfig.update({"LatentSize": self.HPs["LatentSize"]})
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],
                                     {"latent": self.HPs["LatentSize"]},
                                     variables=networkConfig,
                                     printSummary=True)
        self.Discriminator = CreateModel(self.HPs["DiscNetworkConfig"],
                                         dataset.inputSpec,
                                         variables=networkConfig,
                                         printSummary=True)
        self.Encoder = CreateModel(self.HPs["EncNetworkConfig"],
                                   dataset.inputSpec,
                                   variables=networkConfig,
                                   printSummary=True)
        self.Encoder2 = CreateModel(self.HPs["EncNetworkConfig"],
                                    dataset.inputSpec,
                                    variables=networkConfig)

        self.generatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                               self.HPs["LearningRate"])
        self.discriminatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                                   self.HPs["LearningRate"])
        self.encoderOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                             self.HPs["LearningRate"])
        self.crossEntropy = tf.keras.losses.BinaryCrossentropy(
            from_logits=False)
コード例 #12
0
class GAN(BaseMethod):
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Models for the method """

        self.HPs.update({
            "LearningRate": 0.0001,
            "LatentSize": 64,
            "Optimizer": "Adam",
            "Epochs": 10,
        })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "DiscNetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec = dataset.inputSpec
        networkConfig.update(dataset.outputSpec)
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],
                                     {"latent": self.HPs["LatentSize"]},
                                     variables=networkConfig,
                                     printSummary=True)
        self.Discriminator = CreateModel(self.HPs["DiscNetworkConfig"],
                                         dataset.inputSpec,
                                         variables=networkConfig,
                                         printSummary=True)

        self.generatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                               self.HPs["LearningRate"])
        self.discriminatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                                   self.HPs["LearningRate"])
        self.crossEntropy = tf.keras.losses.BinaryCrossentropy(
            from_logits=False)

    @tf.function
    def TrainStep(self, images):
        randomLatent = tf.random.normal(
            [self.HPs["BatchSize"], self.HPs["LatentSize"]])

        with tf.GradientTape() as genTape, tf.GradientTape() as discTape:
            generatedImages = self.Generator(randomLatent,
                                             training=True)["Decoder"]

            realPred = self.Discriminator(images, training=True)["Discrim"]
            fakePred = self.Discriminator({"image": generatedImages},
                                          training=True)["Discrim"]

            genLoss = self.crossEntropy(tf.ones_like(fakePred), fakePred)
            discLoss = self.crossEntropy(tf.ones_like(realPred), realPred) + \
                        self.crossEntropy(tf.zeros_like(fakePred), fakePred)

        generatorGradients = genTape.gradient(
            genLoss, self.Generator.trainable_variables)
        discriminatorGradients = discTape.gradient(
            discLoss, self.Discriminator.trainable_variables)

        self.generatorOptimizer.apply_gradients(
            zip(generatorGradients, self.Generator.trainable_variables))
        self.discriminatorOptimizer.apply_gradients(
            zip(discriminatorGradients,
                self.Discriminator.trainable_variables))

        return {"Generator Loss": genLoss, "Discriminator Loss": discLoss}

    def ImagesFromLatent(self, sample):
        return self.Generator.predict(sample)["Decoder"]
コード例 #13
0
ファイル: Classifier.py プロジェクト: vanstrn/MV
class Classifier(BaseMethod):
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs = {
            "LearningRate": 0.00005,
            "Optimizer": "Adam",
            "Epochs": 10,
            "BatchSize": 64,
            "Shuffle": True,
        }

        self.requiredParams = [
            "NetworkConfig",
        ]

        #Chacking Valid hyperparameters are specified
        CheckFilled(self.requiredParams, settingsDict["NetworkHPs"])
        self.HPs.update(settingsDict["NetworkHPs"])

        #Processing Other inputs
        self.opt = GetOptimizer(self.HPs["Optimizer"],
                                self.HPs["LearningRate"])
        networkConfig.update(dataset.outputSpec)
        self.Model = CreateModel(self.HPs["NetworkConfig"],
                                 dataset.inputSpec,
                                 variables=networkConfig)
        self.Model.compile(optimizer=self.opt,
                           loss=["mse"],
                           metrics=['accuracy'])
        self.Model.summary(print_fn=log.info)

        self.LoadModel({"modelPath": "models/Test"})

    def Train(self, data, callbacks=[]):

        self.Model.fit(data["x_train"],
                       data["y_train"],
                       epochs=self.HPs["Epochs"],
                       batch_size=self.HPs["BatchSize"],
                       shuffle=self.HPs["Shuffle"],
                       callbacks=callbacks)
        self.SaveModel("models/Test")

    def Test(self, data):
        count = 0
        for i in range(0, data["x_test"].shape[0], self.HPs["BatchSize"]):
            if i + self.HPs["BatchSize"] > data["x_test"].shape[0]:
                pred = self.Model(np.expand_dims(data["x_test"][i:, :, :], -1))
                realFinal = tf.math.argmax(data["y_test"][i:, :], axis=-1)
            else:
                pred = self.Model(
                    np.expand_dims(
                        data["x_test"][i:i + self.HPs["BatchSize"], :, :], -1))
                realFinal = tf.math.argmax(
                    data["y_test"][i:i + self.HPs["BatchSize"], :], axis=-1)
            predFinal = tf.math.argmax(pred['Classifier'], axis=-1)
            count += tf.reduce_sum(
                tf.cast(tf.math.equal(realFinal, predFinal), dtype=tf.float32))
        print(count, data["x_test"].shape[0])
        print(count / data["x_test"].shape[0])
コード例 #14
0
ファイル: GANomaly.py プロジェクト: vanstrn/MV
class GANomaly(BaseMethod):
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs.update({
            "LearningRate": 0.00005,
            "LatentSize": 16,
            "Optimizer": "Adam",
            "Epochs": 10,
            "w_adv": 1,
            "w_con": 50,
            "w_enc": 1,
        })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "EncNetworkConfig",
            "DiscNetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        networkConfig.update(dataset.outputSpec)
        networkConfig.update({"LatentSize": self.HPs["LatentSize"]})
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],
                                     {"latent": self.HPs["LatentSize"]},
                                     variables=networkConfig,
                                     printSummary=True)
        self.Discriminator = CreateModel(self.HPs["DiscNetworkConfig"],
                                         dataset.inputSpec,
                                         variables=networkConfig,
                                         printSummary=True)
        self.Encoder = CreateModel(self.HPs["EncNetworkConfig"],
                                   dataset.inputSpec,
                                   variables=networkConfig,
                                   printSummary=True)
        self.Encoder2 = CreateModel(self.HPs["EncNetworkConfig"],
                                    dataset.inputSpec,
                                    variables=networkConfig)

        self.generatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                               self.HPs["LearningRate"])
        self.discriminatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                                   self.HPs["LearningRate"])
        self.encoderOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                             self.HPs["LearningRate"])
        self.crossEntropy = tf.keras.losses.BinaryCrossentropy(
            from_logits=False)

    def Train(self, data, callbacks=[]):
        self.InitializeCallbacks(callbacks)
        trainDataset = self.SetupDataset(data)
        for epoch in range(self.HPs["Epochs"]):
            ts = time.time()

            for batch in trainDataset:
                info1 = {}
                info2 = self.TrainStepXZX(batch)
            self.ExecuteEpochEndCallbacks(epoch, {**info1, **info2})
            log.info("End Epoch {}: Time {}".format(epoch, time.time() - ts))
        self.ExecuteTrainEndCallbacks({})

    @tf.function
    def TrainStepGAN(self, images):
        randomLatent = tf.random.normal(
            [self.HPs["BatchSize"], self.HPs["LatentSize"]])

        with tf.GradientTape() as genTape, tf.GradientTape() as discTape:
            generatedImages = self.Generator(randomLatent,
                                             training=True)["Decoder"]

            realOutput = self.Discriminator(images, training=True)
            fakeOutput = self.Discriminator({"image": generatedImages},
                                            training=True)
            realPred = realOutput["Discrim"]
            realFeatures = realOutput["Features"]
            fakePred = fakeOutput["Discrim"]
            fakeFeatures = fakeOutput["Features"]

            genLoss = self.crossEntropy(tf.ones_like(fakePred), fakePred)
            featLoss = tf.reduce_mean(realFeatures - fakeFeatures)**2.0
            discLoss = self.crossEntropy(tf.ones_like(realPred), realPred) + \
                        self.crossEntropy(tf.zeros_like(fakePred), fakePred)
            genAllLoss = genLoss  # + feat_loss

        generatorGradients = genTape.gradient(
            genAllLoss, self.Generator.trainable_variables)
        discriminatorGradients = discTape.gradient(
            discLoss, self.Discriminator.trainable_variables)

        self.generatorOptimizer.apply_gradients(
            zip(generatorGradients, self.Generator.trainable_variables))
        self.discriminatorOptimizer.apply_gradients(
            zip(discriminatorGradients,
                self.Discriminator.trainable_variables))

        return {"Generator Loss": genLoss, "Discriminator Loss": discLoss}

    # @tf.function
    def TrainStepXZX(self, images):

        with tf.GradientTape() as genTape, tf.GradientTape(
        ) as encTape, tf.GradientTape() as discTape:
            z = self.Encoder(images, training=True)["Latent"]
            x_hat = self.Generator(z, training=True)["Decoder"]
            z_hat = self.Encoder2(x_hat, training=True)["Latent"]

            realOutput = self.Discriminator(images, training=True)
            fakeOutput = self.Discriminator(x_hat, training=True)
            realPred = realOutput["Discrim"]
            realFeatures = realOutput["Features"]
            fakePred = fakeOutput["Discrim"]
            fakeFeatures = fakeOutput["Features"]

            genLoss = self.crossEntropy(tf.ones_like(fakePred), fakePred)
            featLoss = tf.reduce_mean(realFeatures - fakeFeatures)**2.0
            discLoss = self.crossEntropy(tf.ones_like(realPred), realPred) + \
                        self.crossEntropy(tf.zeros_like(fakePred), fakePred)

            encoderLoss = tf.reduce_mean((z_hat - z)**2)
            contextLoss = tf.reduce_mean(tf.math.abs(x_hat - images["image"]))
            totalLoss = encoderLoss + contextLoss + genLoss + featLoss

        generatorGradients = genTape.gradient(
            totalLoss, self.Generator.trainable_variables)
        discriminatorGradients = discTape.gradient(
            totalLoss, self.Discriminator.trainable_variables)
        encoderGradients = encTape.gradient(
            totalLoss, self.Encoder.trainable_variables +
            self.Encoder2.trainable_variables)

        self.generatorOptimizer.apply_gradients(
            zip(generatorGradients, self.Generator.trainable_variables))
        self.encoderOptimizer.apply_gradients(
            zip(
                encoderGradients, self.Encoder.trainable_variables +
                self.Encoder2.trainable_variables))
        self.discriminatorOptimizer.apply_gradients(
            zip(discriminatorGradients,
                self.Discriminator.trainable_variables))

        return {
            "Generator Loss": genLoss,
            "Discriminator Loss": discLoss,
            "Feature Loss": featLoss,
            "Encoding Loss": encoderLoss,
            "Construction Loss": contextLoss
        }

    def InitializeCallbacks(self, callbacks):
        """Method initializes callbacks for training loops that are not `model.fit()`.
        Pass any params that the callbacks need into the generation of the callback list.

        For methods with multiple networks, pass them is as dictionaries.
        This requires callbacks that are compatible with the dictionary style of model usage.
        This style is compatible with the `method.fit()` method b/c code nests the inputed model variable without performing checks.
        Future it might be desirable to create a custom model nesting logic that will allow callbacks like `ModelCheckpoint` to be compatible.
        """
        self.callbacks = tf.keras.callbacks.CallbackList(
            callbacks, model=self, LatentSize=self.HPs["LatentSize"])

    def LatentFromImage(self, sample):
        return self.Encoder.predict(sample)["Latent"]

    def ImagesFromLatent(self, sample):
        return self.Generator.predict(sample)["Decoder"]

    def ImagesFromImage(self, testImages):
        z = self.Encoder.predict({"image": testImages})["Latent"]
        return self.Generator.predict({"latent": z})["Decoder"]

    def AnomalyScore(self, testImages):
        return tf.reduce_sum(
            (testImages - self.ImagesFromImage(testImages))**2,
            axis=list(range(1, len(testImages.shape))))
コード例 #15
0
class BiGAN(BaseMethod):
    def __init__(self,settingsDict,dataset,networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs.update({
                    "LearningRate":0.00005,
                    "LatentSize":64,
                    "Optimizer":"Adam",
                    "Epochs":10,
                    "BatchSize":32,
                    "Shuffle":True,
                     })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "DiscNetworkConfig",
            "EncNetworkConfig",
            ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec=dataset.inputSpec
        self.opt = GetOptimizer(self.HPs["Optimizer"],self.HPs["LearningRate"])
        networkConfig.update(dataset.outputSpec)
        networkConfig.update({"LatentSize":self.HPs["LatentSize"]})
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],{"latent":self.HPs["LatentSize"]},variables=networkConfig,printSummary=True)
        self.Encoder = CreateModel(self.HPs["EncNetworkConfig"],dataset.inputSpec,variables=networkConfig,printSummary=True)
        _datasetSpec = {"features":[self.HPs["LatentSize"]],**dataset.inputSpec}
        self.Discriminator = CreateModel(self.HPs["DiscNetworkConfig"],_datasetSpec,variables=networkConfig,printSummary=True)

        self.generatorOptimizer = GetOptimizer(self.HPs["Optimizer"],self.HPs["LearningRate"])
        self.discriminatorOptimizer = GetOptimizer(self.HPs["Optimizer"],self.HPs["LearningRate"])
        self.crossEntropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)

    @tf.function
    def TrainStep(self,images):
        randomLatent = tf.random.normal([self.HPs["BatchSize"], self.HPs["LatentSize"]])

        with tf.GradientTape() as genTape, tf.GradientTape() as discTape:
            generatedImages = self.Generator(randomLatent, training=True)["Decoder"]

            e_z = self.Encoder(images)["Latent"]
            realPred = self.Discriminator({**images,"features":e_z}, training=True)["Discrim"]
            fakePred = self.Discriminator({"image":generatedImages,"features":randomLatent}, training=True)["Discrim"]

            discLoss = self.crossEntropy(tf.ones_like(realPred), realPred) + \
                        self.crossEntropy(tf.zeros_like(fakePred), fakePred)
            genLoss = self.crossEntropy(tf.ones_like(fakePred), fakePred)
            encLoss = self.crossEntropy(tf.zeros_like(realPred), realPred)
            genAllLoss = genLoss + encLoss

        generatorGradients = genTape.gradient(genAllLoss, self.Generator.trainable_variables+self.Encoder.trainable_variables)
        discriminatorGradients = discTape.gradient(discLoss, self.Discriminator.trainable_variables)

        self.generatorOptimizer.apply_gradients(zip(generatorGradients, self.Generator.trainable_variables+self.Encoder.trainable_variables))
        self.discriminatorOptimizer.apply_gradients(zip(discriminatorGradients, self.Discriminator.trainable_variables))

        return {"Generator Loss": genLoss,"Discriminator Loss": discLoss,"Encoder Loss": encLoss}

    def ImagesFromLatent(self,sample):
        return self.Generator.predict(sample)["Decoder"]

    def LatentFromImage(self,sample):
        return self.Encoder.predict(sample)["Latent"]

    def ImagesFromImage(self,testImages):
        z = self.Encoder.predict({"image":testImages})["Latent"]
        return self.Generator.predict({"latent":z})["Decoder"]

    def ImageDiscrim(self,testImages):
        z = self.Encoder.predict({"image":testImages})["Latent"]
        return self.Discriminator.predict({"image":testImages,"features":z})["Discrim"]

    def AnomalyScore(self,testImages,alpha=0.9):
        v1 = tf.reduce_sum((testImages-self.ImagesFromImage(testImages))**2,axis=list(range(1,len(testImages.shape))))
        v2 = tf.squeeze(self.ImageDiscrim(testImages))
        return alpha * v1 + (1-alpha)*v2
コード例 #16
0
ファイル: Autoencoder.py プロジェクト: vanstrn/MV
class Autoencoder_v3(BaseMethod):
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Autoencoder test method. This implementation runs about 4-8% slower than the base Autoencoding method."""

        self.HPs.update({
            "LearningRate": 0.00005,
            "LatentSize": 64,
            "Optimizer": "Adam",
            "Epochs": 10,
            "BatchSize": 64,
        })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "EncNetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec = dataset.inputSpec
        networkConfig.update(dataset.outputSpec)
        networkConfig.update({"LatentSize": self.HPs["LatentSize"]})
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],
                                     {"latent": self.HPs["LatentSize"]},
                                     variables=networkConfig,
                                     printSummary=True)
        self.Encoder = CreateModel(self.HPs["EncNetworkConfig"],
                                   dataset.inputSpec,
                                   variables=networkConfig,
                                   printSummary=True)

        self.optimizer = GetOptimizer(self.HPs["Optimizer"],
                                      self.HPs["LearningRate"])
        self.mse = tf.keras.losses.MeanSquaredError()

    @tf.function
    def TrainStep(self, images):

        with tf.GradientTape() as tape:
            latent = self.Encoder(images, training=True)["Latent"]
            generatedImages = self.Generator(latent, training=True)["Decoder"]

            loss = self.mse(images["image"], generatedImages)

        gradients = tape.gradient(
            loss, self.Generator.trainable_variables +
            self.Encoder.trainable_variables)

        self.optimizer.apply_gradients(
            zip(
                gradients, self.Generator.trainable_variables +
                self.Encoder.trainable_variables))

        return {"Autoencoder Loss": loss}

    def ImagesFromLatent(self, sample):
        return self.Generator.predict(sample)["Decoder"]

    def ImagesFromImage(self, testImages):
        latent = self.Encoder.predict({"image": testImages})["Latent"]
        return self.Generator.predict({"latent": latent})["Decoder"]

    def AnomalyScore(self, testImages):
        return tf.reduce_sum(
            (testImages - self.ImagesFromImage(testImages))**2,
            axis=list(range(1, len(testImages.shape))))

    def LatentFromImage(self, sample):
        return self.Encoder.predict(sample)["Latent"]
コード例 #17
0
ファイル: WGAN.py プロジェクト: vanstrn/MV
class WGAN(BaseMethod):
    def __init__(self, settingsDict, dataset, networkConfig={}):
        """Initializing Model and all Hyperparameters """

        self.HPs.update({
            "LearningRate": 0.00005,
            "LatentSize": 64,
            "Optimizer": "RMS",
            "Epochs": 10,
            "DiscrimClipValue": 0.01,
            "GenUpdateFreq": 5,
        })

        self.requiredParams.Append([
            "GenNetworkConfig",
            "DiscNetworkConfig",
        ])

        super().__init__(settingsDict)

        #Processing Other inputs
        self.inputSpec = dataset.inputSpec
        networkConfig.update(dataset.outputSpec)
        self.Generator = CreateModel(self.HPs["GenNetworkConfig"],
                                     {"latent": self.HPs["LatentSize"]},
                                     variables=networkConfig,
                                     printSummary=True)
        self.Discriminator = CreateModel(self.HPs["DiscNetworkConfig"],
                                         dataset.inputSpec,
                                         variables=networkConfig,
                                         printSummary=True)

        self.generatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                               self.HPs["LearningRate"])
        self.discriminatorOptimizer = GetOptimizer(self.HPs["Optimizer"],
                                                   self.HPs["LearningRate"])

        self.counter = 0

    def Train(self, data, callbacks=[]):
        self.InitializeCallbacks(callbacks)
        trainDataset = self.SetupDataset(data)
        for epoch in range(self.HPs["Epochs"]):
            ts = time.time()

            for batch in trainDataset:
                trainGen = (self.counter % self.HPs["GenUpdateFreq"] == 0)
                info = self.TrainStep(batch, trainGen)
                self.ClipDiscriminator()
                self.counter += 1
            self.ExecuteEpochEndCallbacks(epoch, info)
            log.info("End Epoch {}: Time {}".format(epoch, time.time() - ts))
        self.ExecuteTrainEndCallbacks({})

    @tf.function
    def TrainStep(self, images, trainGen=True):
        randomLatent = tf.random.normal(
            [self.HPs["BatchSize"], self.HPs["LatentSize"]])

        with tf.GradientTape() as genTape, tf.GradientTape() as discTape:
            generatedImages = self.Generator(randomLatent,
                                             training=True)["Decoder"]

            realPred = self.Discriminator(images, training=True)["Discrim"]
            fakePred = self.Discriminator({"image": generatedImages},
                                          training=True)["Discrim"]

            genLoss = -tf.reduce_sum(fakePred)
            discLoss = -tf.reduce_mean(realPred) + tf.reduce_mean(fakePred)

        if trainGen:
            generatorGradients = genTape.gradient(
                genLoss, self.Generator.trainable_variables)
            self.generatorOptimizer.apply_gradients(
                zip(generatorGradients, self.Generator.trainable_variables))

        discriminatorGradients = discTape.gradient(
            discLoss, self.Discriminator.trainable_variables)
        self.discriminatorOptimizer.apply_gradients(
            zip(discriminatorGradients,
                self.Discriminator.trainable_variables))

        return {"Generator Loss": genLoss, "Discriminator Loss": discLoss}

    @tf.function()
    def ClipDiscriminator(self):
        """
        Clips parameters of the discriminator network.

        Parameters
        ----------
        N/A


        Returns
        -------
        N/A
        """
        for params in self.Discriminator.variables:
            params.assign(
                tf.clip_by_value(params, -self.HPs["DiscrimClipValue"],
                                 self.HPs["DiscrimClipValue"]))

    def ImagesFromLatent(self, sample):
        return self.Generator.predict(sample)["Decoder"]