if '__main__' == __name__: from torch import nn from learn_pytorch.OptimizationAlgorithm.traner import train_concise from learn_pytorch.OptimizationAlgorithm.net import LinearReg from utility.load_airfoil_self_noise import load_airfoil_self_noise lr = 0.01 batch_size = 10 num_epoch = 2 features, labels = load_airfoil_self_noise( '../../data/airfoil_self_noise.dat') net = LinearReg(5, 1) train_concise('RMSprop', { 'lr': lr, 'alpha': 0.99 }, net, nn.MSELoss(), features, labels, batch_size, num_epoch)
s_hat = s / (1 - beta2 ** t) p.data -= (v_hat * lr) / (torch.sqrt(s_hat) + err) states['t'] = t + 1 if '__main__' == __name__: from learn_pytorch.OptimizationAlgorithm.loss_func import sqrt_loss from learn_pytorch.OptimizationAlgorithm.traner import train from learn_pytorch.OptimizationAlgorithm.net import LinearReg from utility.load_airfoil_self_noise import load_airfoil_self_noise features, labels = load_airfoil_self_noise('../../data/airfoil_self_noise.dat') batch_size = 10 lr = 0.01 num_epoch = 2 net = LinearReg(5, 1) params = net.parameters() states = init_states(params) train(Adam, states, {'lr': lr}, net, sqrt_loss, features, labels, batch_size, num_epoch )
def sgd(params, states, hyperparams): for param in params: param.data -= hyperparams['lr'] * param.grad.data if '__main__' == __name__: from learn_pytorch.OptimizationAlgorithm.loss_func import sqrt_loss from learn_pytorch.OptimizationAlgorithm.traner import train from learn_pytorch.OptimizationAlgorithm.net import LinearReg from utility.load_airfoil_self_noise import load_airfoil_self_noise features, labels = load_airfoil_self_noise('../../data/airfoil_self_noise.dat') batch_size = 1 lr = 0.005 num_epoch = 2 train(sgd, None, {'lr': lr}, LinearReg(5, 1), sqrt_loss, features, labels, batch_size, num_epoch )