Example #1
0
        s_hat = s / (1 - beta2 ** t)
        p.data -= (v_hat * lr) / (torch.sqrt(s_hat) + err)
    states['t'] = t + 1

if '__main__' == __name__:
    from learn_pytorch.OptimizationAlgorithm.loss_func import sqrt_loss
    from learn_pytorch.OptimizationAlgorithm.traner import train
    from learn_pytorch.OptimizationAlgorithm.net import LinearReg
    from utility.load_airfoil_self_noise import load_airfoil_self_noise

    features, labels = load_airfoil_self_noise('../../data/airfoil_self_noise.dat')
    batch_size = 10
    lr = 0.01
    num_epoch = 2

    net = LinearReg(5, 1)
    params = net.parameters()

    states = init_states(params)

    train(Adam,
          states,
          {'lr': lr},
          net,
          sqrt_loss,
          features,
          labels,
          batch_size,
          num_epoch
          )

def sgd_momentum(params, states, hyperparams):
    for p, v in zip(params, states):
        momentum = hyperparams['momentum']
        lr = hyperparams['lr']
        v.data = momentum * v.data + lr * p.grad.data
        p.data -= v.data


if '__main__' == __name__:
    from learn_pytorch.OptimizationAlgorithm.loss_func import sqrt_loss
    from learn_pytorch.OptimizationAlgorithm.traner import train
    from learn_pytorch.OptimizationAlgorithm.net import LinearReg
    from utility.load_airfoil_self_noise import load_airfoil_self_noise

    features, labels = load_airfoil_self_noise(
        '../../data/airfoil_self_noise.dat')
    batch_size = 10
    lr = 0.02
    num_epoch = 2

    net = LinearReg(5, 1)
    params = net.parameters()

    states = init_states(len(list(params)))

    train(sgd_momentum, states, {
        'lr': lr,
        'momentum': 0.5
    }, net, sqrt_loss, features, labels, batch_size, num_epoch)
Example #3
0
def sgd(params, states, hyperparams):
    for param in params:
        param.data -= hyperparams['lr'] * param.grad.data

if '__main__' == __name__:
    from learn_pytorch.OptimizationAlgorithm.loss_func import sqrt_loss
    from learn_pytorch.OptimizationAlgorithm.traner import train
    from learn_pytorch.OptimizationAlgorithm.net import LinearReg
    from utility.load_airfoil_self_noise import load_airfoil_self_noise

    features, labels = load_airfoil_self_noise('../../data/airfoil_self_noise.dat')
    batch_size = 1
    lr = 0.005
    num_epoch = 2
    train(sgd,
          None,
          {'lr': lr},
          LinearReg(5, 1),
          sqrt_loss,
          features,
          labels,
          batch_size,
          num_epoch
          )
Example #4
0
def RMSProp(params, states, hyperparams):
    lr = hyperparams['lr']
    alpha = hyperparams['alpha']
    err = 1e-6
    for p, s in zip(params, states):
        s.data += s.data * alpha + (1 - alpha) * (p.grad.data)**2
        p.data -= (p.grad.data * lr) / torch.sqrt(s + err)


if '__main__' == __name__:
    from learn_pytorch.OptimizationAlgorithm.loss_func import sqrt_loss
    from learn_pytorch.OptimizationAlgorithm.traner import train
    from learn_pytorch.OptimizationAlgorithm.net import LinearReg
    from utility.load_airfoil_self_noise import load_airfoil_self_noise

    features, labels = load_airfoil_self_noise(
        '../../data/airfoil_self_noise.dat')
    batch_size = 10
    lr = 0.01
    num_epoch = 2

    net = LinearReg(5, 1)
    params = net.parameters()

    states = init_states(params)

    train(RMSProp, states, {
        'lr': lr,
        'alpha': 0.5
    }, net, sqrt_loss, features, labels, batch_size, num_epoch, 'RMSprop')
Example #5
0
#         g =  p.grad.data * torch.sqrt((delta + eps) / (s + eps))
#         p.data -= g
#         delta[:] = rho * delta + (1 - rho) * g * g

if '__main__' == __name__:
    from learn_pytorch.OptimizationAlgorithm.loss_func import sqrt_loss
    from learn_pytorch.OptimizationAlgorithm.traner import train
    from learn_pytorch.OptimizationAlgorithm.net import LinearReg
    from utility.load_airfoil_self_noise import load_airfoil_self_noise

    features, labels = load_airfoil_self_noise('../../data/airfoil_self_noise.dat')
    batch_size = 10
    lr = 0.1
    num_epoch = 2

    net = LinearReg(5, 1)

    params = net.parameters()

    states = init_states(params)
    # states = init_adadelta_states()
    train(adadelta,
          states,
          {'rho': 0.99},
          net,
          sqrt_loss,
          features,
          labels,
          batch_size,
          num_epoch
          )