def test__smoothingGoodEnoughCheck(self): smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_softMarginAdditionalLoops=0, test_weightsEpsilon=1.0, test_smoothingEndCheckType='wgsum') smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata=smoothingMetadata) smoothing.__setDictionary__( smoothingMetadata=smoothingMetadata, dictionary=self.model.getNNModelModule().named_parameters()) smoothing.countWeights = 1 a = 2.0 b = 1.5 ut.testCmpPandas( smoothing._smoothingGoodEnoughCheck( abs(a - b), smoothingMetadata=smoothingMetadata), 'bool', False) ut.testCmpPandas( smoothing._smoothingGoodEnoughCheck( abs(a - b), smoothingMetadata=smoothingMetadata), 'bool', True) b = 3.1 ut.testCmpPandas( smoothing._smoothingGoodEnoughCheck( abs(a - b), smoothingMetadata=smoothingMetadata), 'bool', False)
def test__sumWeightsToArrayStd(self): smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_weightIter=dc.DefaultWeightDecay(2), test_smoothingEndCheckType='std', test_epsilon=1.0, test_weightsEpsilon=1.0, test_hardEpsilon=1e-9, test_softMarginAdditionalLoops=0, test_lossContainer=3, test_lossContainerDelayedStartAt=1, test_weightsArraySize=2) smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata=smoothingMetadata) smoothing.__setDictionary__( smoothingMetadata=smoothingMetadata, dictionary=self.model.getNNModelModule().named_parameters()) self.helperEpoch.epochNumber = 3 self.dataMetadata.epoch = 4 self.helper.loss = torch.Tensor([1.0]) smoothing(helperEpoch=self.helperEpoch, helper=self.helper, model=self.model, dataMetadata=self.dataMetadata, modelMetadata=None, metadata=self.metadata, smoothingMetadata=smoothingMetadata) wg = smoothing.__getSmoothedWeights__( smoothingMetadata=smoothingMetadata, metadata=self.metadata) std = smoothing._sumWeightsToArrayStd(wg) ut.testCmpPandas(std.item(), 'std', dc.ConfigClass.STD_NAN) second_weights = { 'linear1.weight': [[11., 11., 11.]], 'linear1.bias': [13.], 'linear2.weight': [[11.], [11.], [11.]], 'linear2.bias': [13., 13., 13.] } self.model.setConstWeights(weight=11, bias=13) smoothing(helperEpoch=self.helperEpoch, helper=self.helper, model=self.model, dataMetadata=self.dataMetadata, modelMetadata=None, metadata=self.metadata, smoothingMetadata=smoothingMetadata) wg = smoothing.__getSmoothedWeights__( smoothingMetadata=smoothingMetadata, metadata=self.metadata) std = smoothing._sumWeightsToArrayStd(wg) ut.testCmpPandas( std.item(), 'std', torch.std( torch.Tensor([ (11 - 9.0) * 6 + (13 - 11.0) * 4, (9.0 - 5) * 6 + (11.0 - 7) * 4 ])).item()) # smoothed weights = saved weights -> 0
def test_calcMean(self): smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_weightIter=dc.DefaultWeightDecay(2), test_smoothingEndCheckType='wgsum') smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata=smoothingMetadata) smoothing.__setDictionary__( smoothingMetadata=smoothingMetadata, dictionary=self.model.getNNModelModule().named_parameters()) weights = dict(self.model.named_parameters()) self.compareDictToNumpy(iterator=weights, numpyDict=init_weights) smoothing.calcMean(model=self.model, smoothingMetadata=smoothingMetadata) smoothedWg = smoothing.weightsArray.array weights = dict(self.model.named_parameters()) i = smoothedWg[0] self.compareDictToNumpy(iterator=weights, numpyDict=init_weights) self.compareDictToNumpy(iterator=i, numpyDict=init_weights) ######### second_weights = { 'linear1.weight': [[11., 11., 11.]], 'linear1.bias': [13.], 'linear2.weight': [[11.], [11.], [11.]], 'linear2.bias': [13., 13., 13.] } self.model.setConstWeights(weight=11, bias=13) # change model weights smoothing.calcMean(model=self.model, smoothingMetadata=smoothingMetadata) weights = dict(self.model.named_parameters()) i = smoothedWg[1] self.compareDictToNumpy(iterator=i, numpyDict=second_weights) self.compareDictToNumpy(iterator=weights, numpyDict=second_weights) ######## third_weights = { 'linear1.weight': [[9., 9., 9.]], 'linear1.bias': [11.], 'linear2.weight': [[9.], [9.], [9.]], 'linear2.bias': [11., 11., 11.] } smoothing.countWeights = 2 sm_weights = smoothing.__getSmoothedWeights__( smoothingMetadata=smoothingMetadata, metadata=None) weights = dict(self.model.named_parameters()) self.compareDictToNumpy(iterator=sm_weights, numpyDict=third_weights) self.compareDictToNumpy(iterator=weights, numpyDict=second_weights)
def test_experiment_weightedMean_MNIST_predefModel_alexnet(self): with sf.test_mode(): modelName = "alexnet" metadata = sf.Metadata(testFlag=True, trainFlag=True, debugInfo=True) dataMetadata = dc.DefaultData_Metadata( pin_memoryTest=True, pin_memoryTrain=True, epoch=1, test_howOftenPrintTrain=2, howOftenPrintTrain=3, resizeTo=Test_RunExperiment.MNIST_RESIZE) optimizerDataDict = {"learning_rate": 1e-3, "momentum": 0.9} obj = models.alexnet() smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_weightIter=dc.DefaultWeightDecay(1.05), test_device='cpu', test_epsilon=1e-5, test_hardEpsilon=1e-7, test_weightsEpsilon=1e-6, test_weightSumContainerSize=3, test_weightSumContainerSizeStartAt=1, test_lossContainer=20, test_lossContainerDelayedStartAt=10) modelMetadata = dc.DefaultModel_Metadata( lossFuncDataDict={}, optimizerDataDict=optimizerDataDict, device='cuda:0') data = dc.DefaultDataMNIST(dataMetadata) smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata) model = dc.DefaultModelPredef(obj=obj, modelMetadata=modelMetadata, name=modelName) optimizer = optim.SGD(model.getNNModelModule().parameters(), lr=optimizerDataDict['learning_rate'], momentum=optimizerDataDict['momentum']) loss_fn = nn.CrossEntropyLoss() stat = dc.run(metadataObj=metadata, data=data, model=model, smoothing=smoothing, optimizer=optimizer, lossFunc=loss_fn, modelMetadata=modelMetadata, dataMetadata=dataMetadata, smoothingMetadata=smoothingMetadata)
def test__sumAllWeights(self): smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_smoothingEndCheckType='wgsum') smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata=smoothingMetadata) smoothing.__setDictionary__( smoothingMetadata=smoothingMetadata, dictionary=self.model.getNNModelModule().named_parameters()) smoothing.countWeights = 1 smoothing.calcMean(model=self.model, smoothingMetadata=smoothingMetadata) sumWg = smoothing._sumAllWeights(smoothingMetadata=smoothingMetadata, metadata=self.metadata) ut.testCmpPandas(sumWg, 'weight_sum', 58.0)
def test__getSmoothedWeights__(self): smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_weightIter=dc.DefaultWeightDecay(2), test_smoothingEndCheckType='wgsum', test_epsilon=1.0, test_weightsEpsilon=1.0, test_hardEpsilon=1e-9, test_softMarginAdditionalLoops=0, test_lossContainer=3, test_lossContainerDelayedStartAt=1, test_weightsArraySize=2) self.helperEpoch.maxTrainTotalNumber = 1000 smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata=smoothingMetadata) smoothing.__setDictionary__( smoothingMetadata=smoothingMetadata, dictionary=self.model.getNNModelModule().named_parameters()) self.helper.loss = torch.Tensor([1.0]) smoothing(helperEpoch=self.helperEpoch, helper=self.helper, model=self.model, dataMetadata=self.dataMetadata, modelMetadata=None, metadata=self.metadata, smoothingMetadata=smoothingMetadata) self.compareDictToNumpy( smoothing.__getSmoothedWeights__( smoothingMetadata=smoothingMetadata, metadata=self.metadata), {}) smoothing(helperEpoch=self.helperEpoch, helper=self.helper, model=self.model, dataMetadata=self.dataMetadata, modelMetadata=None, metadata=self.metadata, smoothingMetadata=smoothingMetadata) # aby zapisać wagi self.compareDictToNumpy( smoothing.__getSmoothedWeights__( smoothingMetadata=smoothingMetadata, metadata=self.metadata), init_weights) self.model.setConstWeights(weight=17, bias=19) w = (17 + 5 / 2) / 1.5 b = (19 + 7 / 2) / 1.5 self.checkSmoothedWeights(smoothing=smoothing, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothingMetadata=smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, w=w, b=b) self.model.setConstWeights(weight=23, bias=27) w = (23 + 17 / 2) / 1.5 b = (27 + 19 / 2) / 1.5 self.checkSmoothedWeights(smoothing=smoothing, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothingMetadata=smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, w=w, b=b) self.model.setConstWeights(weight=31, bias=37) w = (31 + 23 / 2) / 1.5 b = (37 + 27 / 2) / 1.5 self.checkSmoothedWeights(smoothing=smoothing, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothingMetadata=smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, w=w, b=b)
def test__isSmoothingGoodEnough__(self): self.smoothingMetadata = dc.Test_DefaultSmoothingOscilationWeightedMean_Metadata( test_epsilon=1.0, test_hardEpsilon=1e-9, test_smoothingEndCheckType='wgsum', test_weightsEpsilon=2.0, test_softMarginAdditionalLoops=0, test_lossContainer=3, test_lossContainerDelayedStartAt=1, test_weightsArraySize=3) smoothing = dc.DefaultSmoothingOscilationWeightedMean( smoothingMetadata=self.smoothingMetadata) smoothing.__setDictionary__( smoothingMetadata=self.smoothingMetadata, dictionary=self.model.getNNModelModule().named_parameters()) self.helper.loss = torch.Tensor([1.0]) self.checkOscilation__isSmoothingGoodEnough__( avgLoss=1.0, avgKLoss=0, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothing=smoothing, smoothingMetadata=self.smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, booleanIsGood=False) self.helper.loss = torch.Tensor([0.5]) self.checkOscilation__isSmoothingGoodEnough__( avgLoss=1.5 / 2, avgKLoss=1.0, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothing=smoothing, smoothingMetadata=self.smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, booleanIsGood=False) self.checkOscilation__isSmoothingGoodEnough__( avgLoss=2 / 3, avgKLoss=1.5 / 2, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothing=smoothing, smoothingMetadata=self.smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, booleanIsGood=False) self.helper.loss = torch.Tensor([1.5]) self.checkOscilation__isSmoothingGoodEnough__( avgLoss=2.5 / 3, avgKLoss=1 / 2, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothing=smoothing, smoothingMetadata=self.smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, booleanIsGood=False) self.checkOscilation__isSmoothingGoodEnough__( avgLoss=3.5 / 3, avgKLoss=2 / 2, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothing=smoothing, smoothingMetadata=self.smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, booleanIsGood=False) self.checkOscilation__isSmoothingGoodEnough__( avgLoss=4.5 / 3, avgKLoss=3 / 2, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothing=smoothing, smoothingMetadata=self.smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, booleanIsGood=False) self.helper.loss = torch.Tensor([1.3]) self.checkOscilation__isSmoothingGoodEnough__( avgLoss=4.3 / 3, avgKLoss=3 / 2, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothing=smoothing, smoothingMetadata=self.smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, booleanIsGood=False) self.checkOscilation__isSmoothingGoodEnough__( avgLoss=4.1 / 3, avgKLoss=2.8 / 2, helperEpoch=self.helperEpoch, dataMetadata=self.dataMetadata, smoothing=smoothing, smoothingMetadata=self.smoothingMetadata, helper=self.helper, model=self.model, metadata=self.metadata, booleanIsGood=True)