Ejemplo n.º 1
0
def test_pick_value_by_action(batch_size, action_size, n_quantiles, keepdims):
    if n_quantiles == 0:
        values = torch.rand(batch_size, action_size)
    else:
        values = torch.rand(batch_size, action_size, n_quantiles)

    action = torch.randint(action_size, size=(batch_size, ))

    rets = _pick_value_by_action(values, action, keepdims)

    if n_quantiles == 0:
        if keepdims:
            assert rets.shape == (batch_size, 1)
        else:
            assert rets.shape == (batch_size, )
    else:
        if keepdims:
            assert rets.shape == (batch_size, 1, n_quantiles)
        else:
            assert rets.shape == (batch_size, n_quantiles)

    rets = rets.view(batch_size, -1)

    for i in range(batch_size):
        assert (rets[i] == values[i][action[i]]).all()
Ejemplo n.º 2
0
def test_discrete_qr_q_function(feature_size, action_size, n_quantiles,
                                batch_size, gamma):
    encoder = DummyEncoder(feature_size)
    q_func = DiscreteQRQFunction(encoder, action_size, n_quantiles)

    # check output shape
    x = torch.rand(batch_size, feature_size)
    y = q_func(x)
    assert y.shape == (batch_size, action_size)

    # check taus
    taus = q_func._make_taus(encoder(x))
    step = 1 / n_quantiles
    for i in range(n_quantiles):
        assert np.allclose(taus[0][i].numpy(), i * step + step / 2.0)

    # check compute_target
    action = torch.randint(high=action_size, size=(batch_size, ))
    target = q_func.compute_target(x, action)
    assert target.shape == (batch_size, n_quantiles)

    # check compute_target with action=None
    targets = q_func.compute_target(x)
    assert targets.shape == (batch_size, action_size, n_quantiles)

    # check quantile huber loss
    obs_t = torch.rand(batch_size, feature_size)
    act_t = torch.randint(action_size, size=(batch_size, ))
    rew_tp1 = torch.rand(batch_size, 1)
    q_tp1 = torch.rand(batch_size, n_quantiles)
    ter_tp1 = torch.randint(2, size=(batch_size, 1))
    # shape check
    loss = q_func.compute_error(obs_t,
                                act_t,
                                rew_tp1,
                                q_tp1,
                                ter_tp1,
                                reduction="none")
    assert loss.shape == (batch_size, 1)
    # mean loss
    loss = q_func.compute_error(obs_t, act_t, rew_tp1, q_tp1, ter_tp1)

    target = rew_tp1.numpy() + gamma * q_tp1.numpy() * (1 - ter_tp1.numpy())
    y = _pick_value_by_action(q_func._compute_quantiles(encoder(obs_t), taus),
                              act_t)

    reshaped_target = np.reshape(target, (batch_size, -1, 1))
    reshaped_y = np.reshape(y.detach().numpy(), (batch_size, 1, -1))
    reshaped_taus = np.reshape(taus, (1, 1, -1))

    ref_loss = ref_quantile_huber_loss(reshaped_y, reshaped_target,
                                       reshaped_taus, n_quantiles)
    assert np.allclose(loss.cpu().detach(), ref_loss.mean())

    # check layer connection
    check_parameter_updates(q_func, (obs_t, act_t, rew_tp1, q_tp1, ter_tp1))
Ejemplo n.º 3
0
def test_discrete_qr_q_function(feature_size, action_size, n_quantiles,
                                batch_size, gamma):
    encoder = DummyEncoder(feature_size)
    q_func = DiscreteQRQFunction(encoder, action_size, n_quantiles)

    # check output shape
    x = torch.rand(batch_size, feature_size)
    y = q_func(x)
    assert y.shape == (batch_size, action_size)

    action = torch.randint(high=action_size, size=(batch_size, ))
    target = q_func.compute_target(x, action)
    quantiles = q_func(x, as_quantiles=True)
    assert target.shape == (batch_size, n_quantiles)
    assert (quantiles[torch.arange(batch_size), action] == target).all()

    # check quantile huber loss
    obs_t = torch.rand(batch_size, feature_size)
    act_t = torch.randint(action_size, size=(batch_size, ))
    rew_tp1 = torch.rand(batch_size, 1)
    q_tp1 = torch.rand(batch_size, n_quantiles)
    # shape check
    loss = q_func.compute_error(obs_t, act_t, rew_tp1, q_tp1, reduction='none')
    assert loss.shape == (batch_size, 1)
    # mean loss
    loss = q_func.compute_error(obs_t, act_t, rew_tp1, q_tp1)

    target = (rew_tp1.numpy() + gamma * q_tp1.numpy())
    y = _pick_value_by_action(q_func(obs_t, as_quantiles=True), act_t)
    taus = _make_taus_prime(n_quantiles, 'cpu:0').numpy()

    reshaped_target = np.reshape(target, (batch_size, -1, 1))
    reshaped_y = np.reshape(y.detach().numpy(), (batch_size, 1, -1))
    reshaped_taus = np.reshape(taus, (1, 1, -1))

    ref_loss = ref_quantile_huber_loss(reshaped_y, reshaped_target,
                                       reshaped_taus, n_quantiles)
    assert np.allclose(loss.cpu().detach(), ref_loss.mean())

    # check layer connection
    check_parameter_updates(q_func, (obs_t, act_t, rew_tp1, q_tp1))