def test_updateTotalNumbLoops(self): dataMetadata = dc.DefaultData_Metadata(epoch=7) data = dc.DefaultDataMNIST(dataMetadata) data.epochHelper = sf.EpochDataContainer() data._updateTotalNumbLoops(dataMetadata) ut.testCmpPandas(data.epochHelper.maxTrainTotalNumber, "max_loops_train", 7 * 1 * len(data.trainloader)) ut.testCmpPandas(data.epochHelper.maxTestTotalNumber, "max_loops_test", 7 * 2 * len(data.testloader))
def test_experiment_weightedMean_MNIST_predefModel_alexnet(self): with sf.test_mode(): modelName = "alexnet" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata( pin_memoryTest=True, pin_memoryTrain=True, epoch=1, test_howOftenPrintTrain=2, howOftenPrintTrain=3, resizeTo=Test_RunExperiment.MNIST_RESIZE) optimizerDataDict = {"learning_rate": 1e-3, "momentum": 0.9} obj = models.alexnet() smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_weightIter=dc.DefaultWeightDecay(1.05), test_device='cpu', test_epsilon=1e-5, test_hardEpsilon=1e-7, test_weightsEpsilon=1e-6, test_weightSumContainerSize=3, test_weightSumContainerSizeStartAt=1, test_lossContainer=20, test_lossContainerDelayedStartAt=10) modelMetadata = dc.DefaultModel_Metadata( lossFuncDataDict={}, optimizerDataDict=optimizerDataDict, device='cuda:0') data = dc.DefaultDataMNIST(dataMetadata) smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata) model = dc.DefaultModelPredef(obj=obj, modelMetadata=modelMetadata, name=modelName) optimizer = optim.SGD(model.getNNModelModule().parameters(), lr=optimizerDataDict['learning_rate'], momentum=optimizerDataDict['momentum']) loss_fn = nn.CrossEntropyLoss() stat = dc.run(metadataObj=metadata, data=data, model=model, smoothing=smoothing, optimizer=optimizer, lossFunc=loss_fn, modelMetadata=modelMetadata, dataMetadata=dataMetadata, smoothingMetadata=smoothingMetadata)
def setUp(self): self.metadata = sf.Metadata() self.metadata.debugInfo = True self.metadata.logFolderSuffix = str(time.time()) self.metadata.debugOutput = 'debug' self.metadata.prepareOutput() self.modelMetadata = TestModel_Metadata() self.model = TestModel(self.modelMetadata) self.helper = sf.TrainDataContainer() self.dataMetadata = dc.DefaultData_Metadata() self.helperEpoch = sf.EpochDataContainer() self.helperEpoch.trainTotalNumber = 3 self.helperEpoch.maxTrainTotalNumber = 1000
def setUp(self): self.metadata = sf.Metadata() self.metadata.debugInfo = True self.metadata.logFolderSuffix = str(time.time()) self.metadata.debugOutput = 'debug' self.metadata.prepareOutput() self.modelMetadata = TestModel_Metadata() self.model = TestModel(self.modelMetadata) self.helper = sf.TrainDataContainer() self.smoothingMetadata = dc.Test_DefaultSmoothingBorderline_Metadata( test_numbOfBatchAfterSwitchOn=2) self.dataMetadata = dc.DefaultData_Metadata() self.helperEpoch = sf.EpochDataContainer() self.helperEpoch.trainTotalNumber = 3
def test_updateTotalNumbLoops_testMode(self): with sf.test_mode(): dataMetadata = dc.DefaultData_Metadata(epoch=7) data = dc.DefaultDataMNIST(dataMetadata) data.epochHelper = sf.EpochDataContainer() data._updateTotalNumbLoops(dataMetadata) ut.testCmpPandas(data.epochHelper.maxTrainTotalNumber, "max_loops_train", 7 * sf.StaticData.MAX_DEBUG_LOOPS * 1) ut.testCmpPandas(data.epochHelper.maxTestTotalNumber, "max_loops_test", 7 * sf.StaticData.MAX_DEBUG_LOOPS * 2)
def test_experiment_borderline_MNIST_predefModel_wide_resnet(self): with sf.test_mode(): modelName = "wide_resnet" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata( pin_memoryTest=True, pin_memoryTrain=True, epoch=1, test_howOftenPrintTrain=2, howOftenPrintTrain=3, resizeTo=Test_RunExperiment.MNIST_RESIZE) optimizerDataDict = {"learning_rate": 1e-3, "momentum": 0.9} obj = models.wide_resnet50_2() smoothingMetadata = dc.Test_DefaultSmoothingBorderline_Metadata( test_numbOfBatchAfterSwitchOn=5, test_device='cuda:0') modelMetadata = dc.DefaultModel_Metadata( lossFuncDataDict={}, optimizerDataDict=optimizerDataDict, device='cuda:0') data = dc.DefaultDataMNIST(dataMetadata) smoothing = dc.DefaultSmoothingBorderline(smoothingMetadata) model = dc.DefaultModelPredef(obj=obj, modelMetadata=modelMetadata, name=modelName) optimizer = optim.SGD(model.getNNModelModule().parameters(), lr=optimizerDataDict['learning_rate'], momentum=optimizerDataDict['momentum']) loss_fn = nn.CrossEntropyLoss() stat = dc.run(metadataObj=metadata, data=data, model=model, smoothing=smoothing, optimizer=optimizer, lossFunc=loss_fn, modelMetadata=modelMetadata, dataMetadata=dataMetadata, smoothingMetadata=smoothingMetadata)
def test_experiment_pytorchSWA_CIFAR10_predefModel_alexnet(self): with sf.test_mode(): modelName = "simpleConv" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata(pin_memoryTest=True, pin_memoryTrain=True, epoch=1, test_howOftenPrintTrain=2, howOftenPrintTrain=3) optimizerDataDict = {"learning_rate": 1e-3, "momentum": 0.9} smoothingMetadata = dc.Test_DefaultPytorchAveragedSmoothing_Metadata( test_device='cuda:0') modelMetadata = dc.DefaultModel_Metadata( lossFuncDataDict={}, optimizerDataDict=optimizerDataDict, device='cuda:0') data = dc.DefaultDataMNIST(dataMetadata) model = dc.DefaultModelSimpleConv(modelMetadata=modelMetadata) smoothing = dc.DefaultPytorchAveragedSmoothing(smoothingMetadata, model=model) optimizer = optim.SGD(model.getNNModelModule().parameters(), lr=optimizerDataDict['learning_rate'], momentum=optimizerDataDict['momentum']) loss_fn = nn.CrossEntropyLoss() stat = dc.run(metadataObj=metadata, data=data, model=model, smoothing=smoothing, optimizer=optimizer, lossFunc=loss_fn, modelMetadata=modelMetadata, dataMetadata=dataMetadata, smoothingMetadata=smoothingMetadata)
import torch import torchvision import torch.optim as optim from framework import smoothingFramework as sf import matplotlib.pyplot as plt import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torchvision.models as models from framework import defaultClasses as dc if(__name__ == '__main__'): #sf.StaticData.TEST_MODE = True metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata(pin_memoryTest=True, pin_memoryTrain=True, fromGrayToRGB=False) loop = 5 ##################### types = ('predefModel', 'CIFAR10', 'borderline') try: stats = [] rootFolder = sf.Output.getTimeStr() + ''.join(x + "_" for x in types) + "set" for r in range(loop): obj = models.alexnet() metadata.resetOutput() smoothingMetadata = dc.DefaultSmoothingBorderline_Metadata(numbOfBatchAfterSwitchOn=2000) modelMetadata = dc.DefaultModel_Metadata() stat=dc.run(numbOfRepetition=1, modelType=types[0], dataType=types[1], smoothingType=types[2], metadataObj=metadata,
import matplotlib.pyplot as plt import torchvision.transforms as transforms import torch.nn as nn import torch.nn.functional as F import torchvision.models as models from framework import defaultClasses as dc if (__name__ == '__main__'): #sf.StaticData.TEST_MODE = True # pin_memory = False - na serwerze inaczej występuje Warning: Leaking Caffe2 thread-pool after fork. # więcej w wątku https://github.com/pytorch/pytorch/issues/57273 metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata(pin_memoryTest=False, pin_memoryTrain=False, epoch=2, fromGrayToRGB=True) loop = 5 modelName = "wide_resnet50_2" prefix = "epsilons_" types = ('predefModel', 'MNIST', 'movingMean') try: stats = [] rootFolder = prefix + sf.Output.getTimeStr() + ''.join( x + "_" for x in types) + "set" smoothingMetadata = dc.DefaultSmoothingOscilationEWMA_Metadata( movingAvgParam=0.15, epsilon=1e-1, hardEpsilon=1e-3, weightsEpsilon=1e-2,
import matplotlib.pyplot as plt from framework import smoothingFramework as sf from framework import defaultClasses as dc # wzorowane na pracy https://paperswithcode.com/paper/wide-residual-networks # model wzorowany na resnet18 https://github.com/huyvnphan/PyTorch_CIFAR10/blob/master/module.py if(__name__ == '__main__'): modelDevice = 'cuda:0' if(sf.test_mode().isActive()): modelDevice="cuda:0" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata(pin_memoryTest=False, pin_memoryTrain=False, epoch=100, fromGrayToRGB=False, batchTrainSize=125, batchTestSize=125, startTestAtEpoch=[0, 24, 44, 74, 99]) optimizerDataDict={"learning_rate":0.1, "momentum":0.9, "weight_decay":0.001} modelMetadata = dc.DefaultModel_Metadata(device=modelDevice, lossFuncDataDict={}, optimizerDataDict=optimizerDataDict) loop = 5 modelName = "wide_resnet" prefix = "set_copyOfExper_" runningAvgSize = 10 num_classes = 10 layers = [2, 2, 2, 2] block = modResnet.BasicBlock types = ('predefModel', 'CIFAR10', 'disabled') try: stats = [] rootFolder = prefix + sf.Output.getTimeStr() + ''.join(x + "_" for x in types)