def LoadFromDisk(self): self.CurrentPageStart += 2 if self.CurrentPageStart > 2: self.CurrentPageStart = 0 sFileName1 = Storage.JoinPath( self.DataFolder, MLDataIterator.FILENAME_TEMPLATE_PAGE % (self.PageNumbers[self.PageIndex])) oData1 = Storage.DeserializeObjectFromFile(sFileName1, p_bIsVerbose=False) self.Page[self.CurrentPageStart] = oData1 if type(self).__verboseLevel >= 2: print(" [>] Load MEM%d: %d" % (self.CurrentPageStart, self.PageNumbers[self.PageIndex])) if self.PageIndex + 1 < len(self.PageNumbers): sFileName2 = Storage.JoinPath( self.DataFolder, MLDataIterator.FILENAME_TEMPLATE_PAGE % (self.PageNumbers[self.PageIndex + 1])) oData2 = Storage.DeserializeObjectFromFile(sFileName2, p_bIsVerbose=False) if type(self).__verboseLevel >= 2: print(" [>] Load MEM%d: %d " % (self.CurrentPageStart + 1, self.PageNumbers[self.PageIndex + 1])) else: oData2 = None self.Page[self.CurrentPageStart + 1] = oData2
def __determineInitialModelUID(self): sFiles = Storage.GetFilesSorted(self.RunSub.ArchitectureCommonFolder) sUID = None for sFile in sFiles: if sFile.startswith("initial-model_"): _, sName, _ = Storage.SplitFileName(sFile) sUID = sName[-12:] # A standard fold number 1 and the last saved initial experiment in the common folder will be returned return sUID
def ListCompressedModels(self): sResult = [] if Storage.IsExistingPath(self.ExperimentModelFolder): sModelZipFiles = Storage.GetFilesSorted(self.ExperimentModelFolder) for sZipFile in sModelZipFiles: sZipFile = Storage.JoinPath(self.ExperimentModelFolder, sZipFile) sResult.append(sZipFile) return sResult
def StoreCompressedModels(self): sZipFiles = self.RunSub.ListCompressedModels() sDestFolder = self.StoreSub.ExperimentModelFolder Storage.EnsurePathExists(sDestFolder) for sZipFile in sZipFiles: self.Log.Print("Moving model %s to storage folder %s" % (sZipFile, sDestFolder)) Storage.MoveFileToFolder(sZipFile, sDestFolder) Storage.DeleteEmptyFolder(self.RunSub.ExperimentModelFolder)
def ListSavedResults(self): if Storage.IsExistingPath(self.ExperimentResultsFolder): sModelResultFiles = Storage.GetFilesSorted( self.ExperimentResultsFolder) oModelResults = [] for sResultFile in sModelResultFiles: _, sFileName, _ = Storage.SplitFileName(sResultFile) nEpochNumber = int(sFileName) oModelResults.append([nEpochNumber, sResultFile, None]) return oModelResults
def CompressModels(self, p_nEpochNumbers): sUID = self.ParentExperiment.MinuteUID.UID for nEpochToCompress in p_nEpochNumbers: sModelFolder = self.ModelFolderTemplate % nEpochToCompress bContinueToDelete, sArchiveName = Storage.CompressFolder( sModelFolder, "model_%s_epoch_%.3d.zip" % (sUID, nEpochToCompress)) if bContinueToDelete: bContinueToDelete = Storage.IsExistingFile(sArchiveName) if bContinueToDelete: self.DeleteSavedModel(nEpochToCompress)
def ListSavedModels(self): sModelFolders = [] if Storage.IsExistingPath(self.ExperimentModelFolder): if not Storage.IsFolderEmpty(self.ExperimentModelFolder): sModelFolders = Storage.GetDirectoriesSorted( self.ExperimentModelFolder) oModels = [] for sModel in sModelFolders: sFolder = Storage.JoinPath(self.ExperimentModelFolder, sModel) sModelFiles = Storage.GetFilesSorted(sFolder) nEpochNumber = int(sModel) oModels.append([nEpochNumber, sFolder, sModelFiles]) return oModels
def GetNextConfigToEvaluate(self): sFiles = Storage.GetFilesSorted(self.ToEvaluteFolder) sConfigFiles = [] for sFile in sFiles: _, _, sExt = Storage.SplitFileName(sFile) if sExt == ".cfg": sConfigFiles.append( Storage.JoinPath(self.ToEvaluteFolder, sFile)) if len(sFiles) > 0: sResult = sConfigFiles[0] else: sResult = None return sResult
def __loadAll(self): for nIndex, sFileRec in enumerate(self.ResultFiles): _, sEpochNumber, _ = Storage.SplitFileName(sFileRec[0]) sFileNameFull = sFileRec[1] oMetrics = ClassificationMetrics() oMetrics.Load(sFileNameFull) print( "Accuracy:%f Top%dAccuracy%s" % (oMetrics.Accuracy, oMetrics.TopKappa, oMetrics.TopKAccuracy)) self.EpochNumber[nIndex] = int(sEpochNumber) self.Accuracy[nIndex] = oMetrics.Accuracy self.Recall[nIndex] = oMetrics.AverageRecall self.Precision[nIndex] = oMetrics.AveragePrecision self.F1Score[nIndex] = oMetrics.AverageF1Score if oMetrics.ClassCount == 2: self.IsBinary = True # Cross entropy of the F1 scores for binary classification. self.CrossF1Score[nIndex] = -( oMetrics.F1Score[0] * np.log10(oMetrics.F1Score[1]) + oMetrics.F1Score[1] * np.log10(oMetrics.F1Score[0])) self.ObjectiveF1Score[ nIndex] = self.F1Score[nIndex] / self.CrossF1Score[nIndex] # Special binary classification, with the class 0 the class positives self.PositiveF1Score[nIndex] = oMetrics.F1Score[0] print(sEpochNumber, oMetrics.F1Score[0], oMetrics.F1Score[1], self.CrossF1Score[nIndex], self.ObjectiveF1Score[nIndex])
def __determineCommonCaltechClassesForLITE(self): sCaltechClasses = [] with open( Storage.JoinPath(self.DataSetFolder.SourceFolder, "caltech101-classes.txt"), "r") as oFile: for sLine in oFile: sCaltechClasses.append(sLine.strip()) if type(self).__verboseLevel >= 1: print("Caltech classes: %d" % len(sCaltechClasses)) self.ClassCodes = [] for nIndex, sClassCode in enumerate(self.ImageNetClassCodes): sClassDescriptions = self.ImageNetSynSetDict[sClassCode] bFound = any(sClass in sClassDescriptions for sClass in sCaltechClasses) if bFound: sDescriptions = sClassDescriptions.split(",") for sClass in sCaltechClasses: sFound = [[sDescr] for sDescr in sDescriptions if sClass == sDescr.strip()] if len(sFound) != 0: self.ClassCodes.append(sClassCode) self.ClassDescr.append(sClass) self.CaltechClassDescr.append(sClass) self.ImageNetClassID.append(nIndex + 1) self.ImageNetClassDescr.append(sClassDescriptions)
def Save(self, p_sFileName): oData = { "FileFormat": "TALOS008", "Kind": self.Kind, "IDs": self.IDs, "Actual": self.ActualClasses, "Predicted": self.PredictedClasses, "PredictedProbsTop": self.PredictedProbsTop, "TopKappa": self.TopKappa, "Accuracy": self.Accuracy, "TopKAccuracy": self.TopKAccuracy, "AveragePrecision": self.AveragePrecision, "AverageRecall": self.AverageRecall, "AverageF1Score": self.AverageF1Score, "AverageSupport": self.AverageSupport #,"Top1Error" : None #,"Top5Error" : None , "ClassPrecision": self.Precision, "ClassRecall": self.Recall, "ClassF1Score": self.F1Score, "ClassSupport": self.Support, "ConfusionMatrix": self.ConfusionMatrix } Storage.SerializeObjectToFile(p_sFileName, oData)
def Save(self, p_sFileName): oData = { "FileFormat": "TALOS008", "IsBinary": self.IsBinary, "EpochNumber": self.EpochNumber, "FileNames": self.FileNames, "Accuracy": self.Accuracy, "Recall": self.Recall, "Precision": self.Precision, "F1Score": self.F1Score, "CrossF1Score": self.CrossF1Score, "ObjectiveF1Score": self.ObjectiveF1Score, "PositiveF1Score": self.PositiveF1Score, "BestEpochs": self.BestEpochs, "BestPoints": self.BestPoints, "BestRecall": self.BestRecall, "BestPrecision": self.BestPrecision, "BestF1Score": self.BestF1Score, "BestCrossF1Score": self.BestCrossF1Score, "BestObjectiveF1Score": self.BestObjectiveF1Score, "BestPositiveF1Score": self.BestPositiveF1Score, "DiscardedEpochs": self.DiscardedEpochs, "BestRecallEpochs": self.BestRecallEpochs, "BestPrecisionEpochs": self.BestPrecisionEpochs, "BestF1ScoreEpochs": self.BestF1ScoreEpochs, "BestCrossF1ScoreEpochs": self.BestCrossF1ScoreEpochs, "BestObjectiveF1ScoreEpochs": self.BestObjectiveF1ScoreEpochs, "BestPositiveScoreEpochs": self.BestPositiveScoreEpochs } Storage.SerializeObjectToFile(p_sFileName, oData, p_bIsOverwritting=True)
def Load(self, p_sFileName): oData = Storage.DeserializeObjectFromFile(p_sFileName) assert oData is not None, "Evaluation results file not found %s" % p_sFileName self.IDs = oData["IDs"] self.Kind = oData["Kind"] self.ActualClasses = oData["Actual"] self.PredictedClasses = oData["Predicted"] self.PredictedProbsTop = oData["PredictedProbsTop"] if self.PredictedProbsTop is not None: self.TopCount = self.PredictedProbsTop.shape[1] if "TopKappa" in oData: self.TopKappa = oData["TopKappa"] if "Accuracy" in oData: self.Accuracy = oData["Accuracy"] if "TopKAccuracy" in oData: self.TopKAccuracy = oData["TopKAccuracy"] self.AveragePrecision = oData["AveragePrecision"] self.AverageRecall = oData["AverageRecall"] self.AverageF1Score = oData["AverageF1Score"] self.AverageSupport = oData["AverageSupport"] #self.Top1Error = oData["Top1Error"] #self.Top5Error = oData["Top5Error"] self.Precision = oData["ClassPrecision"] self.Recall = oData["ClassRecall"] self.F1Score = oData["ClassF1Score"] self.Support = oData["ClassSupport"] self.ConfusionMatrix = oData["ConfusionMatrix"] self.ClassCount = self.Recall.shape[0]
def GetConfig(cls, p_sFolder): oResult = None sFolder = Storage.JoinPath(p_sFolder, "config") if Storage.IsExistingPath(sFolder): sFileList = Storage.GetFilesSorted(sFolder) sFileName = None for sItem in sFileList: if sItem.startswith("learn-config-used"): sFileName = Storage.JoinPath(sFolder, sItem) break if sFileName is not None: oResult = NNLearnConfig() oResult.LoadFromFile(sFileName) oResult.ParseUID() return oResult
def SetTemplateName(self, p_sName): if p_sName is not None: self.TemplateName = p_sName else: self.TemplateName = "template.cfg" self.TemplateConfigFileName = Storage.JoinPath(self.EditFolder, self.TemplateName)
def __sequenceFoldNumber(self, p_sSourceFileName, p_nFoldSequence, p_nCounter): print(" -> Sequencing fold numbers from template") sResult = [] nCounter = p_nCounter for nFoldNumber in p_nFoldSequence: oNewConfig = NNLearnConfig() oNewConfig.LoadFromFile(p_sSourceFileName) oNewConfig.FoldNumber = nFoldNumber _, sName, _ = Storage.SplitFileName(p_sSourceFileName) sDestFileName = Storage.JoinFileName( self.PendingFolder, "%.3d-" % p_nCounter + sName + "-fold%d" % nFoldNumber, ".cfg") sResult.append(sDestFileName) oNewConfig.SaveToFile(sDestFileName) nCounter += 1 return nCounter, sResult
def __sequenceLearningRate(self, p_sSourceFileName, p_nLearningRateSequence, p_nCounter): print(" -> Sequencing learning rates from template") sResult = [] nCounter = p_nCounter for nLearningRate in p_nLearningRateSequence: oNewConfig = NNLearnConfig() oNewConfig.LoadFromFile(p_sSourceFileName) oNewConfig.Learn.LearningRate = nLearningRate _, sName, _ = Storage.SplitFileName(p_sSourceFileName) sDestFileName = Storage.JoinFileName( self.PendingFolder, "%.3d-" % p_nCounter + sName + "-lr%.6f" % nLearningRate, ".cfg") sResult.append(sDestFileName) oNewConfig.SaveToFile(sDestFileName) nCounter += 1 return nCounter, sResult
def __loadClassesFromDisk(self): bResult = Storage.IsExistingFile(self.DataSetFolder.ClassesFile) if bResult: oData = Storage.DeserializeObjectFromFile( self.DataSetFolder.ClassesFile) self.ClassCodes = oData["ClassCodes"] self.ClassDescr = oData["ClassDescr"] self.ClassCount = len(self.ClassCodes) assert len( self.ClassDescr ) == self.ClassCount, "incorrect count of class descriptions %d" % len( self.ClassDescr) self.Train.ClassFolders = oData["ClassFoldersTrain"] self.Validation.ClassFolders = oData["ClassFoldersVal"] self.Testing.ClassFolders = oData["ClassFoldersTest"] self.Train.ClassSamplesAvailable = oData[ "ClassSamplesAvailableTrain"] self.Validation.ClassSamplesAvailable = oData[ "ClassSamplesAvailableVal"] self.Testing.ClassSamplesAvailable = oData[ "ClassSamplesAvailableTest"] self.Train.IsActive = oData["HasTrain"] self.Validation.IsActive = oData["HasVal"] self.Testing.IsActive = oData["HasTest"] self.CaltechClassDescr = oData["CaltechClassDescr"] self.ImageNetClassID = oData["ImageNetClassID"] self.ImageNetClassCodes = oData["ImageNetClassCodes"] self.ImageNetClassDescr = oData["ImageNetClassDescr"] self.TrainSamplesPerClass = oData["TrainSamplesPerClass"] self.PageSize = oData["PageSize"] self.Log.Print(" |__ Classes: %d" % self.ClassCount) else: raise Exception("No dataset found under %s" % self.DataSetFolder.BaseFolder) return bResult
def __readCounter(self): """ Gets the current run/evaluation counter """ self.Counter = Storage.DeserializeObjectFromFile(self.CountersFileName) if self.Counter is None: self.Counter = {"FormatVersion": "TALOS10", "RunCounter": 1} nCounter = 1 else: nCounter = self.Counter["RunCounter"] return nCounter
def GetNextConfig(self): # By priority first evaluates models to save disk space and then start training sResult = self.GetNextConfigToEvaluate() if sResult is None: sFiles = Storage.GetFilesSorted(self.PendingFolder) sConfigFiles = [] for sFile in sFiles: _, _, sExt = Storage.SplitFileName(sFile) if sExt == ".cfg": sConfigFiles.append( Storage.JoinPath(self.PendingFolder, sFile)) if len(sFiles) > 0: sResult = sConfigFiles[0] else: sResult = None return sResult
def Initialize(self, p_sCustomBaseFolder=None): if self.Metrics is None: self.Metrics = self.Settings.Metrics self.SerieLabels = self.Settings.Titles if self.ExperimentsToCompare is None: self.ExperimentsToCompare = self.Settings.ExperimentsToCompare self.Epochs = np.zeros( len(self.ExperimentsToCompare) + 1, np.int32) self.ModelTitles = [] for nIndex, sExperimentERL in enumerate(self.ExperimentsToCompare): if p_sCustomBaseFolder is not None: # Here a subfolder is given and the custom base folder is prepended sExperimentFolder = Storage.JoinPath(p_sCustomBaseFolder, sExperimentERL) oExperiment = ExperimentFolder.GetExperiment( sExperimentFolder, p_sCustomBaseFolder) assert oExperiment is not None, "Experiment folder %s not found" % sExperimentFolder # Sets the config that is needed to return architecture and dataset for the learn comparison if self.Settings.Config is None: self.Settings.Config = oExperiment.LearnConfig else: oExperiment = ExperimentFolder( p_oLearnConfig=self.Settings.Config) oExperiment.OpenERL(p_sERLString=sExperimentERL) #nFoldNumber, sUID = ExperimentFolder.SplitExperimentCode(oExperimentCode) #oExperiment = ExperimentFolder(p_oLearnConfig=self.Settings.Config) #oExperiment.Open(nFoldNumber, sUID) dStats = Storage.DeserializeObjectFromFile( oExperiment.RunSub.StatsFileName) assert dStats is not None, "File not found %s" % oExperiment.RunSub.StatsFileName self.Envs.append(oExperiment) self.Stats.append(dStats) self.Epochs[nIndex] = dStats["EpochNumber"] - 1 #nFoldNumber, sUID = ExperimentFolder.SplitExperimentCode(oExperiment.Code) self.ModelTitles.append( self.Settings.ExperimentDescriptions[nIndex] + " (%s)" % oExperiment.ERL.ExperimentUID)
def __listFiles(self): sEvaluationResultFiles = Storage.GetFilesSorted(self.Folder) self.FileNames = [] self.ResultFiles = [] for sFile in sEvaluationResultFiles: sFileNameFull = Storage.JoinPath(self.Folder, sFile) self.FileNames.append(sFileNameFull) self.ResultFiles.append([sFile, sFileNameFull]) nFileCount = len(self.ResultFiles) self.EpochNumber = np.zeros((nFileCount), np.float32) self.Accuracy = np.zeros((nFileCount), np.float32) self.Recall = np.zeros((nFileCount), np.float32) self.Precision = np.zeros((nFileCount), np.float32) self.F1Score = np.zeros((nFileCount), np.float32) self.Points = np.zeros((nFileCount), np.float32) self.CrossF1Score = np.zeros((nFileCount), np.float32) self.ObjectiveF1Score = np.zeros((nFileCount), np.float32) self.PositiveF1Score = np.zeros((nFileCount), np.float32)
def __init__(self, p_oParent, p_nFoldNumber, p_bIsRun=False): #........ | Instance Attributes | .............................................. self.ParentExperiment = p_oParent self.FoldNumber = p_nFoldNumber self.IsRun = p_bIsRun if self.IsRun: self.Folder = os.path.join(self.ParentExperiment.RunBaseFolder, "fold%.2d" % self.FoldNumber) else: self.Folder = os.path.join(self.ParentExperiment.BaseFolder, "fold%.2d" % self.FoldNumber) Storage.EnsurePathExists(self.Folder) sFolders = Storage.GetDirectoriesSorted(self.Folder) if len(sFolders) > 0: self.LastUID = sFolders[-1] else: self.LastUID = ExperimentSubFolder.NO_SUBFOLDERS self.__pathsToEnsure = None
def AddConfig(self, p_sConfigFileName=None): if p_sConfigFileName is None: sSourceFileName = self.TemplateConfigFileName else: sSourceFileName = p_sConfigFileName _, sName, _ = Storage.SplitFileName(sSourceFileName) # Gets the current run/evaluation counter nCounter = self.__readCounter() oConfig = NNLearnConfig() oConfig.LoadFromFile(sSourceFileName) sDestFileNames = None if oConfig.LearningRateSequence is not None: nCounter, sDestFileNames = self.__sequenceLearningRate( sSourceFileName, oConfig.LearningRateSequence, nCounter) elif oConfig.FoldSequence is not None: nCounter, sDestFileNames = self.__sequenceFoldNumber( sSourceFileName, oConfig.FoldSequence, nCounter) # for nFoldNumber in oConfig.FoldSequence: # oNewConfig = NNLearnConfig() # oNewConfig.LoadFromFile(sSourceFileName) # oNewConfig.FoldNumber = nFoldNumber # sDestFileName = Storage.JoinFileName(self.PendingFolder, "%.3d-" % nCounter + sName, ".cfg") # oNewConfig.SaveToFile(sDestFileName) # nCounter += 1 else: sDestFileNames = [ Storage.JoinFileName(self.PendingFolder, "%.3d-" % nCounter + sName, ".cfg") ] Storage.CopyFile(sSourceFileName, sDestFileNames[0]) nCounter += 1 # Saves the current run/evaluation counter self.__writeCounter() return sDestFileNames
def ExportToText(self, p_sTextFileName, p_oExperiment=None): bIsAppending = p_oExperiment is not None if bIsAppending: sLearningConfigLines = Storage.ReadTextFile( p_oExperiment.RunSub.LearnConfigUsedFileName) sLearningLogLines = Storage.ReadTextFile( p_oExperiment.RunSub.LogFileName) # Dumps the class folders to a text file with open(p_sTextFileName, "w") as oOutFile: print("=" * 80, file=oOutFile) print("epochs :", self.BestEpochs, file=oOutFile) print("points :", self.BestPoints, file=oOutFile) if self.IsBinary: print("objective f1 ratio", self.BestObjectiveF1Score, file=oOutFile) print("cross f1 :", self.BestCrossF1Score, file=oOutFile) print("positive f1 :", self.BestPositiveF1Score * 100, file=oOutFile) print("recall :", self.BestRecall * 100, file=oOutFile) print("precision :", self.BestPrecision * 100, file=oOutFile) print("f1 score :", self.BestF1Score * 100, file=oOutFile) if bIsAppending: # Appends the related configuration that generated the results print("-" * 80, file=oOutFile) for sLine in sLearningConfigLines: print(sLine, file=oOutFile) print("=" * 80, file=oOutFile) # Appends the log at the end of the best models text file for sLine in sLearningLogLines: print(sLine, file=oOutFile) print("-" * 80, file=oOutFile) print("=" * 80, file=oOutFile)
def __init__(self, p_oConfig=None, p_sFileName=None): super(LearningComparisonSettings, self).__init__(p_sFileName) #........................ | Instance Attributes | .............................. self.Config = p_oConfig self.Metrics = [] self.Titles = [] self.ExperimentsToCompare = [] self.ExperimentDescriptions = [] #................................................................................ #self.ExperimentBaseFolder = Storage.JoinPath(BaseFolders.EXPERIMENTS_RUN # , ExperimentFolder.GetExperimentName(self.Config.Architecture, self.Config.DataSetName)) if self.FileName is None: self.FileName = Storage.JoinPath(BaseFolders.EXPERIMENTS_RUN, "learn-comparison.cfg")
def LoadFromERL(self, p_sERLString): self.ERL = ERLString(p_sERLString) self.FileName = None if self.ERL.IsFull: self.FileName = Storage.JoinPaths([ BaseFolders.EXPERIMENTS_RUN, "%s-%s" % (self.ERL.DataSetName, self.ERL.Architecture), "fold%.2d" % self.ERL.FoldNumber, "%s" % self.ERL.ExperimentUID, "config", "learn-config-used-%s.cfg" % self.ERL.ExperimentUID ]) assert self.FileName is not None, "ERL is not valid" self.LoadFromFile()
def ArchiveFileName(self, p_nCounter): _, sName, sExt = Storage.SplitFileName(self.FileName) sPrefix = None if (self.IsTraining == True): # Training run sPrefix = "r" elif (self.IsTraining == False) and ((self.IsEvaluating == True) or (self.IsDeterminingBest == True)): # Evaluation run sPrefix = "e" assert sPrefix is not None # Prefix is valid for display up to 1000 experiments assert p_nCounter < 1000 sResult = sPrefix + "%.3d-" % p_nCounter + sName + sExt return sResult
def Load(self, p_sFileName): oData = Storage.DeserializeObjectFromFile(p_sFileName, p_bIsVerbose=False) assert oData is not None, "File %s not found" % p_sFileName self.BestEpochs = oData["BestEpochs"] self.IsBinary = oData["IsBinary"] self.EpochNumber = oData["EpochNumber"] self.FileNames = oData["FileNames"] self.Accuracy = oData["Accuracy"] self.Recall = oData["Recall"] self.Precision = oData["Precision"] self.F1Score = oData["F1Score"] self.CrossF1Score = oData["CrossF1Score"] if "ObjectiveF1Score" in oData: self.ObjectiveF1Score = oData["ObjectiveF1Score"] self.PositiveF1Score = oData["PositiveF1Score"] self.BestPoints = oData["BestPoints"] self.BestRecall = oData["BestRecall"] self.BestPrecision = oData["BestPrecision"] self.BestF1Score = oData["BestF1Score"] self.BestCrossF1Score = oData["BestCrossF1Score"] if "BestObjectiveF1Score" in oData: self.BestObjectiveF1Score = oData["BestObjectiveF1Score"] self.BestPositiveF1Score = oData["BestPositiveF1Score"] self.DiscardedEpochs = oData["DiscardedEpochs"] self.BestRecallEpochs = oData["BestRecallEpochs"] self.BestPrecisionEpochs = oData["BestPrecisionEpochs"] self.BestF1ScoreEpochs = oData["BestF1ScoreEpochs"] self.BestCrossF1ScoreEpochs = oData["BestCrossF1ScoreEpochs"] if "BestObjectiveF1ScoreEpochs" in oData: self.BestObjectiveF1ScoreEpochs = oData[ "BestObjectiveF1ScoreEpochs"] self.BestPositiveScoreEpochs = oData["BestPositiveScoreEpochs"]
def Activate(self): """ Returns True : If a new experiment folder is created and the configuration was copied there False: If an existing experiment folder is reused """ assert self.LearnConfig is not None, "Method requires a learn configuration." if self.LearnConfig.SavedExperimentUID is not None: self.Open(self.LearnConfig.FoldNumber, self.LearnConfig.SavedExperimentUID) bMustArchive = False else: if self.LearnConfig.IsTraining: self.Begin() # Copies the source configuration file to the experiment subfolder "config" Storage.CopyFile(self.LearnConfig.FileName, self.RunSub.LearnConfigFileName, True) bMustArchive = True else: self.Open() bMustArchive = False return bMustArchive