def LoadFromDisk(self): self.CurrentPageStart += 2 if self.CurrentPageStart > 2: self.CurrentPageStart = 0 sFileName1 = Storage.JoinPath( self.DataFolder, MLDataIterator.FILENAME_TEMPLATE_PAGE % (self.PageNumbers[self.PageIndex])) oData1 = Storage.DeserializeObjectFromFile(sFileName1, p_bIsVerbose=False) self.Page[self.CurrentPageStart] = oData1 if type(self).__verboseLevel >= 2: print(" [>] Load MEM%d: %d" % (self.CurrentPageStart, self.PageNumbers[self.PageIndex])) if self.PageIndex + 1 < len(self.PageNumbers): sFileName2 = Storage.JoinPath( self.DataFolder, MLDataIterator.FILENAME_TEMPLATE_PAGE % (self.PageNumbers[self.PageIndex + 1])) oData2 = Storage.DeserializeObjectFromFile(sFileName2, p_bIsVerbose=False) if type(self).__verboseLevel >= 2: print(" [>] Load MEM%d: %d " % (self.CurrentPageStart + 1, self.PageNumbers[self.PageIndex + 1])) else: oData2 = None self.Page[self.CurrentPageStart + 1] = oData2
def Load(self, p_sFileName): oData = Storage.DeserializeObjectFromFile(p_sFileName) assert oData is not None, "Evaluation results file not found %s" % p_sFileName self.IDs = oData["IDs"] self.Kind = oData["Kind"] self.ActualClasses = oData["Actual"] self.PredictedClasses = oData["Predicted"] self.PredictedProbsTop = oData["PredictedProbsTop"] if self.PredictedProbsTop is not None: self.TopCount = self.PredictedProbsTop.shape[1] if "TopKappa" in oData: self.TopKappa = oData["TopKappa"] if "Accuracy" in oData: self.Accuracy = oData["Accuracy"] if "TopKAccuracy" in oData: self.TopKAccuracy = oData["TopKAccuracy"] self.AveragePrecision = oData["AveragePrecision"] self.AverageRecall = oData["AverageRecall"] self.AverageF1Score = oData["AverageF1Score"] self.AverageSupport = oData["AverageSupport"] #self.Top1Error = oData["Top1Error"] #self.Top5Error = oData["Top5Error"] self.Precision = oData["ClassPrecision"] self.Recall = oData["ClassRecall"] self.F1Score = oData["ClassF1Score"] self.Support = oData["ClassSupport"] self.ConfusionMatrix = oData["ConfusionMatrix"] self.ClassCount = self.Recall.shape[0]
def __readCounter(self): """ Gets the current run/evaluation counter """ self.Counter = Storage.DeserializeObjectFromFile(self.CountersFileName) if self.Counter is None: self.Counter = {"FormatVersion": "TALOS10", "RunCounter": 1} nCounter = 1 else: nCounter = self.Counter["RunCounter"] return nCounter
def __loadClassesFromDisk(self): bResult = Storage.IsExistingFile(self.DataSetFolder.ClassesFile) if bResult: oData = Storage.DeserializeObjectFromFile( self.DataSetFolder.ClassesFile) self.ClassCodes = oData["ClassCodes"] self.ClassDescr = oData["ClassDescr"] self.ClassCount = len(self.ClassCodes) assert len( self.ClassDescr ) == self.ClassCount, "incorrect count of class descriptions %d" % len( self.ClassDescr) self.Train.ClassFolders = oData["ClassFoldersTrain"] self.Validation.ClassFolders = oData["ClassFoldersVal"] self.Testing.ClassFolders = oData["ClassFoldersTest"] self.Train.ClassSamplesAvailable = oData[ "ClassSamplesAvailableTrain"] self.Validation.ClassSamplesAvailable = oData[ "ClassSamplesAvailableVal"] self.Testing.ClassSamplesAvailable = oData[ "ClassSamplesAvailableTest"] self.Train.IsActive = oData["HasTrain"] self.Validation.IsActive = oData["HasVal"] self.Testing.IsActive = oData["HasTest"] self.CaltechClassDescr = oData["CaltechClassDescr"] self.ImageNetClassID = oData["ImageNetClassID"] self.ImageNetClassCodes = oData["ImageNetClassCodes"] self.ImageNetClassDescr = oData["ImageNetClassDescr"] self.TrainSamplesPerClass = oData["TrainSamplesPerClass"] self.PageSize = oData["PageSize"] self.Log.Print(" |__ Classes: %d" % self.ClassCount) else: raise Exception("No dataset found under %s" % self.DataSetFolder.BaseFolder) return bResult
def Initialize(self, p_sCustomBaseFolder=None): if self.Metrics is None: self.Metrics = self.Settings.Metrics self.SerieLabels = self.Settings.Titles if self.ExperimentsToCompare is None: self.ExperimentsToCompare = self.Settings.ExperimentsToCompare self.Epochs = np.zeros( len(self.ExperimentsToCompare) + 1, np.int32) self.ModelTitles = [] for nIndex, sExperimentERL in enumerate(self.ExperimentsToCompare): if p_sCustomBaseFolder is not None: # Here a subfolder is given and the custom base folder is prepended sExperimentFolder = Storage.JoinPath(p_sCustomBaseFolder, sExperimentERL) oExperiment = ExperimentFolder.GetExperiment( sExperimentFolder, p_sCustomBaseFolder) assert oExperiment is not None, "Experiment folder %s not found" % sExperimentFolder # Sets the config that is needed to return architecture and dataset for the learn comparison if self.Settings.Config is None: self.Settings.Config = oExperiment.LearnConfig else: oExperiment = ExperimentFolder( p_oLearnConfig=self.Settings.Config) oExperiment.OpenERL(p_sERLString=sExperimentERL) #nFoldNumber, sUID = ExperimentFolder.SplitExperimentCode(oExperimentCode) #oExperiment = ExperimentFolder(p_oLearnConfig=self.Settings.Config) #oExperiment.Open(nFoldNumber, sUID) dStats = Storage.DeserializeObjectFromFile( oExperiment.RunSub.StatsFileName) assert dStats is not None, "File not found %s" % oExperiment.RunSub.StatsFileName self.Envs.append(oExperiment) self.Stats.append(dStats) self.Epochs[nIndex] = dStats["EpochNumber"] - 1 #nFoldNumber, sUID = ExperimentFolder.SplitExperimentCode(oExperiment.Code) self.ModelTitles.append( self.Settings.ExperimentDescriptions[nIndex] + " (%s)" % oExperiment.ERL.ExperimentUID)
def Load(self, p_sFileName): oData = Storage.DeserializeObjectFromFile(p_sFileName, p_bIsVerbose=False) assert oData is not None, "File %s not found" % p_sFileName self.BestEpochs = oData["BestEpochs"] self.IsBinary = oData["IsBinary"] self.EpochNumber = oData["EpochNumber"] self.FileNames = oData["FileNames"] self.Accuracy = oData["Accuracy"] self.Recall = oData["Recall"] self.Precision = oData["Precision"] self.F1Score = oData["F1Score"] self.CrossF1Score = oData["CrossF1Score"] if "ObjectiveF1Score" in oData: self.ObjectiveF1Score = oData["ObjectiveF1Score"] self.PositiveF1Score = oData["PositiveF1Score"] self.BestPoints = oData["BestPoints"] self.BestRecall = oData["BestRecall"] self.BestPrecision = oData["BestPrecision"] self.BestF1Score = oData["BestF1Score"] self.BestCrossF1Score = oData["BestCrossF1Score"] if "BestObjectiveF1Score" in oData: self.BestObjectiveF1Score = oData["BestObjectiveF1Score"] self.BestPositiveF1Score = oData["BestPositiveF1Score"] self.DiscardedEpochs = oData["DiscardedEpochs"] self.BestRecallEpochs = oData["BestRecallEpochs"] self.BestPrecisionEpochs = oData["BestPrecisionEpochs"] self.BestF1ScoreEpochs = oData["BestF1ScoreEpochs"] self.BestCrossF1ScoreEpochs = oData["BestCrossF1ScoreEpochs"] if "BestObjectiveF1ScoreEpochs" in oData: self.BestObjectiveF1ScoreEpochs = oData[ "BestObjectiveF1ScoreEpochs"] self.BestPositiveScoreEpochs = oData["BestPositiveScoreEpochs"]