Esempio n. 1
0
def step(model, lr, x, y):
    """Returns the loss and parameter gradients."""
    # value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
    # The 'model' argument can be omitted: by default the derivative wrt
    # the first argument is returned.
    _cost, dmodel = value_and_grad(cost, 'model')(model, x, y)
    return _cost, model - (lr * dmodel)
Esempio n. 2
0
 def step_rstate_and_compute(rstate, x):
     """Compiled myia function that returns a rstate with computed value."""
     # Here it seems mandatory to use the `dout` parameters to get grad,
     # to help myia handle rstate grad correctly.
     (_rs, _val), _grad = value_and_grad(rstate_and_compute,
                                         "x")(rstate, x, dout=(1, 1))
     return _rs, _val + _grad
Esempio n. 3
0
def step(model, x, y):
    #def step(model, x, y, _lr):
    """Returns the loss and parameter gradients.

    value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
    The 'model' argument can be omitted: by default the derivative wrt
    the first argument is returned.
    """
    _cost, dmodel = value_and_grad(cost, 'model')(model, x, y)
    #return _cost, model - dmodel * _lr
    return _cost, model - dmodel
Esempio n. 4
0
 def step_only_compute(rstate, x):
     """Compiled myia function that return only a computed value."""
     # Here dout seems not needed, as rstate is not returned.
     _val, _grad = value_and_grad(only_compute, "x")(rstate, x)
     return _val + _grad
Esempio n. 5
0
 def step(model, inp, target):
     _cost, dmodel = value_and_grad(cost, 'model')(model, inp, target)
     return _cost, update(model, dmodel, update_sgd)
Esempio n. 6
0
 def step(inp):
     _cost, d_inp = value_and_grad(cost, 'inp')(model, inp)
     return _cost, d_inp
Esempio n. 7
0
 def step(model, inp, target):
     _cost, dmodel = value_and_grad(cost, 'model')(model, inp, target)
     return _cost, model - dmodel
Esempio n. 8
0
def step(model, data, target, optimizer):
    loss, dmodel = value_and_grad(cost, 'model')(model, data, target)
    return loss, optimizer(model, dmodel)
Esempio n. 9
0
 def step(model, inp, target):
     _cost, dmodel = value_and_grad(cost, "model")(model, inp, target)
     return _cost, dmodel
Esempio n. 10
0
 def step(_model, inp, w):
     _cost, dinp, dw = value_and_grad(cost, 'inp', 'w')(_model, inp, w)
     return _cost, dinp, dw
Esempio n. 11
0
 def step(_model, inp, w, b):
     _cost, dinp, dw, db = value_and_grad(cost, 'inp', 'w', 'b')(_model,
                                                                 inp, w, b)
     return _cost, dinp, dw, db
Esempio n. 12
0
def step_update(model, x, y, adv):
    adv = (adv - mean(adv)) / (std(adv) + 1e-7)
    loss, dmodel = value_and_grad(cost, "model")(model, x, y, adv)
    return loss, model - dmodel