class Autoencoder_v2(BaseMethod): def __init__(self, settingsDict, dataset, networkConfig={}): """Autoencoder test method. This implementation runs about 5-10% slower than the base Autoencoding method.""" self.HPs.update({ "LearningRate": 0.00005, "Optimizer": "Adam", "Epochs": 10, "LatentSize": 64, "BatchSize": 64, "Shuffle": True, }) self.requiredParams.Append([ "NetworkConfig", ]) super().__init__(settingsDict) #Processing Other inputs self.opt = GetOptimizer(self.HPs["Optimizer"], self.HPs["LearningRate"]) networkConfig.update(dataset.outputSpec) networkConfig.update({"LatentSize": self.HPs["LatentSize"]}) self.Model = CreateModel(self.HPs["NetworkConfig"], dataset.inputSpec, variables=networkConfig, printSummary=True) self.Model.compile(optimizer=self.opt, loss=["mse"], metrics=[]) def Train(self, data, callbacks=[]): self.InitializeCallbacks(callbacks) self.Model.fit(data["image"], data["image"], epochs=self.HPs["Epochs"], batch_size=self.HPs["BatchSize"], shuffle=self.HPs["Shuffle"], callbacks=self.callbacks) self.SaveModel("models/TestAE") def ImagesFromImage(self, testImages): return self.Model.predict({"image": testImages})["Decoder"] def AnomalyScore(self, testImages): return tf.reduce_sum( (testImages - self.ImagesFromImage(testImages))**2, axis=list(range(1, len(testImages.shape))))
class Classifier(BaseMethod): def __init__(self, settingsDict, dataset, networkConfig={}): """Initializing Model and all Hyperparameters """ self.HPs = { "LearningRate": 0.00005, "Optimizer": "Adam", "Epochs": 10, "BatchSize": 64, "Shuffle": True, } self.requiredParams = [ "NetworkConfig", ] #Chacking Valid hyperparameters are specified CheckFilled(self.requiredParams, settingsDict["NetworkHPs"]) self.HPs.update(settingsDict["NetworkHPs"]) #Processing Other inputs self.opt = GetOptimizer(self.HPs["Optimizer"], self.HPs["LearningRate"]) networkConfig.update(dataset.outputSpec) self.Model = CreateModel(self.HPs["NetworkConfig"], dataset.inputSpec, variables=networkConfig) self.Model.compile(optimizer=self.opt, loss=["mse"], metrics=['accuracy']) self.Model.summary(print_fn=log.info) self.LoadModel({"modelPath": "models/Test"}) def Train(self, data, callbacks=[]): self.Model.fit(data["x_train"], data["y_train"], epochs=self.HPs["Epochs"], batch_size=self.HPs["BatchSize"], shuffle=self.HPs["Shuffle"], callbacks=callbacks) self.SaveModel("models/Test") def Test(self, data): count = 0 for i in range(0, data["x_test"].shape[0], self.HPs["BatchSize"]): if i + self.HPs["BatchSize"] > data["x_test"].shape[0]: pred = self.Model(np.expand_dims(data["x_test"][i:, :, :], -1)) realFinal = tf.math.argmax(data["y_test"][i:, :], axis=-1) else: pred = self.Model( np.expand_dims( data["x_test"][i:i + self.HPs["BatchSize"], :, :], -1)) realFinal = tf.math.argmax( data["y_test"][i:i + self.HPs["BatchSize"], :], axis=-1) predFinal = tf.math.argmax(pred['Classifier'], axis=-1) count += tf.reduce_sum( tf.cast(tf.math.equal(realFinal, predFinal), dtype=tf.float32)) print(count, data["x_test"].shape[0]) print(count / data["x_test"].shape[0])