def test_experiment_weightedMean_MNIST_predefModel_alexnet(self): with sf.test_mode(): modelName = "alexnet" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata( pin_memoryTest=True, pin_memoryTrain=True, epoch=1, test_howOftenPrintTrain=2, howOftenPrintTrain=3, resizeTo=Test_RunExperiment.MNIST_RESIZE) optimizerDataDict = {"learning_rate": 1e-3, "momentum": 0.9} obj = models.alexnet() smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_weightIter=dc.DefaultWeightDecay(1.05), test_device='cpu', test_epsilon=1e-5, test_hardEpsilon=1e-7, test_weightsEpsilon=1e-6, test_weightSumContainerSize=3, test_weightSumContainerSizeStartAt=1, test_lossContainer=20, test_lossContainerDelayedStartAt=10) modelMetadata = dc.DefaultModel_Metadata( lossFuncDataDict={}, optimizerDataDict=optimizerDataDict, device='cuda:0') data = dc.DefaultDataMNIST(dataMetadata) smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata) model = dc.DefaultModelPredef(obj=obj, modelMetadata=modelMetadata, name=modelName) optimizer = optim.SGD(model.getNNModelModule().parameters(), lr=optimizerDataDict['learning_rate'], momentum=optimizerDataDict['momentum']) loss_fn = nn.CrossEntropyLoss() stat = dc.run(metadataObj=metadata, data=data, model=model, smoothing=smoothing, optimizer=optimizer, lossFunc=loss_fn, modelMetadata=modelMetadata, dataMetadata=dataMetadata, smoothingMetadata=smoothingMetadata)
def test_updateTotalNumbLoops_testMode(self): with sf.test_mode(): dataMetadata = dc.DefaultData_Metadata(epoch=7) data = dc.DefaultDataMNIST(dataMetadata) data.epochHelper = sf.EpochDataContainer() data._updateTotalNumbLoops(dataMetadata) ut.testCmpPandas(data.epochHelper.maxTrainTotalNumber, "max_loops_train", 7 * sf.StaticData.MAX_DEBUG_LOOPS * 1) ut.testCmpPandas(data.epochHelper.maxTestTotalNumber, "max_loops_test", 7 * sf.StaticData.MAX_DEBUG_LOOPS * 2)
def test_experiment_borderline_MNIST_predefModel_wide_resnet(self): with sf.test_mode(): modelName = "wide_resnet" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata( pin_memoryTest=True, pin_memoryTrain=True, epoch=1, test_howOftenPrintTrain=2, howOftenPrintTrain=3, resizeTo=Test_RunExperiment.MNIST_RESIZE) optimizerDataDict = {"learning_rate": 1e-3, "momentum": 0.9} obj = models.wide_resnet50_2() smoothingMetadata = dc.Test_DefaultSmoothingBorderline_Metadata( test_numbOfBatchAfterSwitchOn=5, test_device='cuda:0') modelMetadata = dc.DefaultModel_Metadata( lossFuncDataDict={}, optimizerDataDict=optimizerDataDict, device='cuda:0') data = dc.DefaultDataMNIST(dataMetadata) smoothing = dc.DefaultSmoothingBorderline(smoothingMetadata) model = dc.DefaultModelPredef(obj=obj, modelMetadata=modelMetadata, name=modelName) optimizer = optim.SGD(model.getNNModelModule().parameters(), lr=optimizerDataDict['learning_rate'], momentum=optimizerDataDict['momentum']) loss_fn = nn.CrossEntropyLoss() stat = dc.run(metadataObj=metadata, data=data, model=model, smoothing=smoothing, optimizer=optimizer, lossFunc=loss_fn, modelMetadata=modelMetadata, dataMetadata=dataMetadata, smoothingMetadata=smoothingMetadata)
def test_experiment_pytorchSWA_CIFAR10_predefModel_alexnet(self): with sf.test_mode(): modelName = "simpleConv" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata(pin_memoryTest=True, pin_memoryTrain=True, epoch=1, test_howOftenPrintTrain=2, howOftenPrintTrain=3) optimizerDataDict = {"learning_rate": 1e-3, "momentum": 0.9} smoothingMetadata = dc.Test_DefaultPytorchAveragedSmoothing_Metadata( test_device='cuda:0') modelMetadata = dc.DefaultModel_Metadata( lossFuncDataDict={}, optimizerDataDict=optimizerDataDict, device='cuda:0') data = dc.DefaultDataMNIST(dataMetadata) model = dc.DefaultModelSimpleConv(modelMetadata=modelMetadata) smoothing = dc.DefaultPytorchAveragedSmoothing(smoothingMetadata, model=model) optimizer = optim.SGD(model.getNNModelModule().parameters(), lr=optimizerDataDict['learning_rate'], momentum=optimizerDataDict['momentum']) loss_fn = nn.CrossEntropyLoss() stat = dc.run(metadataObj=metadata, data=data, model=model, smoothing=smoothing, optimizer=optimizer, lossFunc=loss_fn, modelMetadata=modelMetadata, dataMetadata=dataMetadata, smoothingMetadata=smoothingMetadata)
def test_enterExit(self): ut.testCmpPandas(sf.test_mode.isActive(), "test_mode_plain", False) with sf.test_mode(): ut.testCmpPandas(sf.test_mode.isActive(), "test_mode_on", True) ut.testCmpPandas(sf.test_mode.isActive(), "test_mode_turn_off", False)
import torch.nn.functional as F import torchvision.models as models import torchvision.models.resnet as modResnet import torchvision.transforms as transforms import matplotlib.pyplot as plt from framework import smoothingFramework as sf from framework import defaultClasses as dc # wzorowane na pracy https://paperswithcode.com/paper/wide-residual-networks # model wzorowany na resnet18 https://github.com/huyvnphan/PyTorch_CIFAR10/blob/master/module.py if(__name__ == '__main__'): modelDevice = 'cuda:0' if(sf.test_mode().isActive()): modelDevice="cuda:0" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata(pin_memoryTest=False, pin_memoryTrain=False, epoch=100, fromGrayToRGB=False, batchTrainSize=125, batchTestSize=125, startTestAtEpoch=[0, 24, 44, 74, 99]) optimizerDataDict={"learning_rate":0.1, "momentum":0.9, "weight_decay":0.001} modelMetadata = dc.DefaultModel_Metadata(device=modelDevice, lossFuncDataDict={}, optimizerDataDict=optimizerDataDict) loop = 5 modelName = "wide_resnet" prefix = "set_copyOfExper_" runningAvgSize = 10 num_classes = 10 layers = [2, 2, 2, 2] block = modResnet.BasicBlock