def test_lambda_lr(self, lr_mock): state = {torchbearer.OPTIMIZER: 'optimizer'} scheduler = ExponentialLR(0.4, last_epoch=-4, step_on_batch='batch') scheduler.on_start(state) lr_mock.assert_called_once_with('optimizer', 0.4, last_epoch=-4) self.assertTrue(scheduler._step_on_batch == 'batch')
def test_lambda_lr(self, lr_mock): state = {torchbearer.OPTIMIZER: 'optimizer', torchbearer.EPOCH: 0} scheduler = ExponentialLR(gamma=0.4, step_on_batch=True) scheduler.on_start(state) lr_mock.assert_called_once_with('optimizer', gamma=0.4, last_epoch=-1) self.assertTrue(scheduler._step_on_batch)
alpha=0.5) else: state['contour'] = plt.contourf(x, y, z, cmap=plt.cm.jet, alpha=0.5) plt.tight_layout() plt.show() mypause(0.001) svm = LinearSVM() model = Model(svm, optim.SGD(svm.parameters(), 0.1), hinge_loss, ['loss']).to('cuda') model.fit(X, Y, batch_size=32, epochs=50, verbose=1, callbacks=[ scatter, draw_margin, ExponentialLR(0.999, step_on_batch=True), L2WeightDecay(0.01, params=[svm.w]) ]) plt.ioff() plt.show()
if state[torchbearer.BATCH] % 10 == 0: w = state[torchbearer.MODEL].w[0].detach().to('cpu').numpy() b = state[torchbearer.MODEL].b[0].detach().to('cpu').numpy() z = (w.dot(xy) + b).reshape(x.shape) z[np.where(z > 1.)] = 4 z[np.where((z > 0.) & (z <= 1.))] = 3 z[np.where((z > -1.) & (z <= 0.))] = 2 z[np.where(z <= -1.)] = 1 if CONTOUR in state: for coll in state[CONTOUR].collections: coll.remove() state[CONTOUR] = plt.contourf(x, y, z, cmap=plt.cm.jet, alpha=0.5) else: state[CONTOUR] = plt.contourf(x, y, z, cmap=plt.cm.jet, alpha=0.5) plt.tight_layout() plt.show() mypause(0.001) svm = LinearSVM() model = Trial(svm, optim.SGD(svm.parameters(), 0.1), hinge_loss, ['loss'], callbacks=[scatter, draw_margin, ExponentialLR(0.999, step_on_batch=True), L2WeightDecay(0.01, params=[svm.w])]).to('cuda') model.with_train_data(X, Y, batch_size=32) model.run(epochs=50, verbose=1) plt.ioff() plt.show()