Example #1
0
 def test_calc_probability_density(self):
     density_val = mut.CalcProbabilityDensity(
         distribution=mut.RandomDistribution.kGaussian,
         x=np.array([0.5, 1.]))
     density_ad = mut.CalcProbabilityDensity(
         distribution=mut.RandomDistribution.kGaussian,
         x=np.array([AutoDiffXd(1), AutoDiffXd(2)]))
def CheckProgram(prog):
    """
    Return true if the outputs of all costs and constraints in MathematicalProgram are valid
    
    Arguments:
        prog: a MathematicalProgram pyDrake object
    """
    status = True
    # Check that the outputs of the costs are all scalars
    for cost in prog.generic_costs():
        # Evaluate the cost with floats
        try:
            xs = [1.] * len(cost.variables())
            cost.evaluator().Eval(xs)
        except RuntimeError as err:
            status = False
            print(
                f"Evaluating {cost.evaluator().get_description()} with floats produces a RuntimeError"
            )
        # Evaluate with AutoDiff arrays
        try:
            xd = [AutoDiffXd(1.)] * len(cost.variables())
            cost.evaluator().Eval(xd)
        except RuntimeError as err:
            status = False
            print(
                f"Evaluating {cost.evaluator().get_description()} with AutoDiffs produces a RuntimeError"
            )
    # Check that the outputs of all constraints are vectors
    for cstr in prog.generic_constraints():
        # Evaluate the constraint with floats
        try:
            xs = [1.] * len(cstr.variables())
            cstr.evaluator().Eval(xs)
        except RuntimeError as err:
            status = False
            print(
                f"Evaluating {cstr.evaluator().get_description()} with floats produces a RuntimeError"
            )
        except ValueError as err:
            status = False
            print(
                f"Evaluating {cstr.evaluator().get_description()} with floats resulted in a ValueError"
            )
        # Evaluate constraint with AutoDiffXd
        try:
            xd = [AutoDiffXd(1.)] * len(cstr.variables())
            cstr.evaluator().Eval(xd)
        except RuntimeError as err:
            status = False
            print(
                f"Evaluating {cstr.evaluator().get_description()} with AutoDiffs produces a RuntimeError"
            )
        except ValueError as err:
            print(
                f"Evaluating {cstr.evaluator().get_description()} with AutoDiffs produces a ValueError"
            )
    # Return the status flag
    return status
Example #3
0
 def test_asserts_autodiff(self):
     # Test only scalar; other cases are handled by above test case.
     a = AutoDiffXd(1., [1., 0.])
     b = AutoDiffXd(1., [0., 1.])
     c = AutoDiffXd(2., [3., 4.])
     numpy_compare.assert_equal(a, a)
     numpy_compare.assert_not_equal(a, b)
     numpy_compare.assert_not_equal(a, c)
    def test_eval_binding(self):
        qp = TestQP()
        prog = qp.prog

        x = qp.x
        x_expected = np.array([1., 1.])

        costs = qp.costs
        cost_values_expected = [2., 1.]
        constraints = qp.constraints
        constraint_values_expected = [1., 1., 2., 3.]

        result = mp.Solve(prog)
        self.assertTrue(np.allclose(result.GetSolution(x), x_expected))

        enum = zip(constraints, constraint_values_expected)
        for (constraint, value_expected) in enum:
            value = result.EvalBinding(constraint)
            self.assertTrue(np.allclose(value, value_expected))

        enum = zip(costs, cost_values_expected)
        for (cost, value_expected) in enum:
            value = result.EvalBinding(cost)
            self.assertTrue(np.allclose(value, value_expected))

        self.assertIsInstance(result.EvalBinding(costs[0]), np.ndarray)

        # Bindings for `Eval`.
        x_list = (float(1.), AutoDiffXd(1.), sym.Variable("x"))
        T_y_list = (float, AutoDiffXd, sym.Expression)
        evaluator = costs[0].evaluator()
        for x_i, T_y_i in zip(x_list, T_y_list):
            y_i = evaluator.Eval(x=[x_i, x_i])
            self.assertIsInstance(y_i[0], T_y_i)
    def test_constraint_api(self):
        prog = mp.MathematicalProgram()
        x0, = prog.NewContinuousVariables(1, "x")
        c = prog.AddLinearConstraint(x0 >= 2).evaluator()
        ce = prog.AddLinearEqualityConstraint(2 * x0, 1).evaluator()

        self.assertTrue(c.CheckSatisfied([2.], tol=1e-3))
        self.assertFalse(c.CheckSatisfied([AutoDiffXd(1.)]))
        self.assertIsInstance(c.CheckSatisfied([x0]), sym.Formula)

        ce.set_description("my favorite constraint")
        self.assertEqual(ce.get_description(), "my favorite constraint")

        def check_bounds(c, A, lb, ub):
            self.assertTrue(np.allclose(c.A(), A))
            self.assertTrue(np.allclose(c.lower_bound(), lb))
            self.assertTrue(np.allclose(c.upper_bound(), ub))

        check_bounds(c, [1.], [2.], [np.inf])
        c.UpdateLowerBound([3.])
        check_bounds(c, [1.], [3.], [np.inf])
        c.UpdateUpperBound([4.])
        check_bounds(c, [1.], [3.], [4.])
        c.set_bounds([-10.], [10.])
        check_bounds(c, [1.], [-10.], [10.])
        c.UpdateCoefficients([10.], [-20.], [-30.])
        check_bounds(c, [10.], [-20.], [-30.])

        check_bounds(ce, [2.], [1.], [1.])
        ce.UpdateCoefficients([10.], [20.])
        check_bounds(ce, [10.], [20.], [20.])
Example #6
0
def jacobian2(function, x):
    """
    This is a rewritting of the jacobian function from drake which addresses
    a strange bug that prevents computations of Jdot.

    Compute the jacobian of the function evaluated at the vector input x
    using Eigen's automatic differentiation. The dimension of the jacobian will
    be one more than the output of ``function``.

    ``function`` should be vector-input, and can be any dimension output, and
    must return an array with AutoDiffXd elements.
    """
    x = np.asarray(x)
    assert x.ndim == 1, "x must be a vector"
    x_ad = np.empty(x.shape, dtype=np.object)
    for i in range(x.size):
        der = np.zeros(x.size)
        der[i] = 1
        x_ad.flat[i] = AutoDiffXd(x.flat[i], der)
    y_ad = np.asarray(function(x_ad))

    yds = []
    for y in y_ad.flat:
        yd = y.derivatives()
        if yd.shape == (0, ):
            yd = np.zeros(x_ad.shape)
        yds.append(yd)

    return np.vstack(yds).reshape(y_ad.shape + (-1, ))
Example #7
0
 def test_eval_ad_smooth(self):
     dut = al.AugmentedLagrangianSmooth(prog=self.prog,
                                        include_x_bounds=True)
     x_val = InitializeAutoDiff(np.array([1., 3]))
     s_val = np.array([AutoDiffXd(1.)])
     al_val, constraint_residue, cost = dut.Eval(x=x_val,
                                                 s=s_val,
                                                 lambda_val=np.array([0.5]),
                                                 mu=0.1)
     self.assertIsInstance(al_val, AutoDiffXd)
     self.assertIsInstance(constraint_residue, np.ndarray)
     self.assertIsInstance(cost, AutoDiffXd)
Example #8
0
    def test_eval_binding(self):
        qp = TestQP()
        prog = qp.prog

        x = qp.x
        x_expected = np.array([1., 1.])

        costs = qp.costs
        cost_values_expected = [2., 1.]
        constraints = qp.constraints
        constraint_values_expected = [1., 1., 2., 3.]

        with catch_drake_warnings(action='ignore'):
            prog.Solve()
            self.assertTrue(np.allclose(prog.GetSolution(x), x_expected))

            enum = zip(constraints, constraint_values_expected)
            for (constraint, value_expected) in enum:
                value = prog.EvalBindingAtSolution(constraint)
                self.assertTrue(np.allclose(value, value_expected))

            enum = zip(costs, cost_values_expected)
            for (cost, value_expected) in enum:
                value = prog.EvalBindingAtSolution(cost)
                self.assertTrue(np.allclose(value, value_expected))

            # Existence check for non-autodiff versions.
            self.assertIsInstance(
                prog.EvalBinding(costs[0], x_expected), np.ndarray)
            self.assertIsInstance(
                prog.EvalBindings(prog.GetAllConstraints(), x_expected),
                np.ndarray)

            # Existence check for autodiff versions.
            self.assertIsInstance(
                jacobian(partial(prog.EvalBinding, costs[0]), x_expected),
                np.ndarray)
            self.assertIsInstance(
                jacobian(partial(prog.EvalBindings, prog.GetAllConstraints()),
                         x_expected),
                np.ndarray)

            # Bindings for `Eval`.
            x_list = (float(1.), AutoDiffXd(1.), sym.Variable("x"))
            T_y_list = (float, AutoDiffXd, sym.Expression)
            evaluator = costs[0].evaluator()
            for x_i, T_y_i in zip(x_list, T_y_list):
                y_i = evaluator.Eval(x=[x_i, x_i])
                self.assertIsInstance(y_i[0], T_y_i)
Example #9
0
 def check_output(context):
     # Check number of output ports and value for a given context.
     output = system.AllocateOutput(context)
     self.assertEquals(output.get_num_ports(), 1)
     system.CalcOutput(context, output)
     if T == float:
         value = output.get_vector_data(0).get_value()
         self.assertTrue(np.allclose([1], value))
     elif T == AutoDiffXd:
         value = output.get_vector_data(0)._get_value_copy()
         # TODO(eric.cousineau): Define `isfinite` ufunc, if
         # possible, to use for `np.allclose`.
         self.assertEqual(value.shape, (1,))
         self.assertEqual(value[0], AutoDiffXd(1.))
     else:
         raise RuntimeError("Bad T: {}".format(T))
Example #10
0
    def test_kinematics_api(self):
        # TODO(eric.cousineau): Reduce these tests to only test API, and do
        # simple sanity checks on the numbers.
        tree = RigidBodyTree(
            FindResourceOrThrow("drake/examples/pendulum/Pendulum.urdf"))
        num_q = 7
        num_v = 7
        self.assertEqual(tree.number_of_positions(), num_q)
        self.assertEqual(tree.number_of_velocities(), num_v)
        q = np.zeros(num_q)
        v = np.zeros(num_v)

        # Trivial kinematics.
        kinsol = tree.doKinematics(q, v)
        p = tree.transformPoints(kinsol, np.zeros(3), 0, 1)
        self.assertTrue(np.allclose(p, np.zeros(3)))

        # Ensure mismatched sizes throw an error.
        q_bad = np.zeros(num_q + 1)
        v_bad = np.zeros(num_v + 1)
        bad_args_list = (
            (q_bad, ),
            (q_bad, v),
            (q, v_bad),
        )
        for bad_args in bad_args_list:
            with self.assertRaises(SystemExit):
                tree.doKinematics(*bad_args)

        # AutoDiff jacobians.

        def do_transform(q):
            kinsol = tree.doKinematics(q)
            point = np.ones(3)
            return tree.transformPoints(kinsol, point, 2, 0)

        # - Position.
        value = do_transform(q)
        self.assertTrue(np.allclose(value, np.ones(3)))
        # - Gradient.
        g = jacobian(do_transform, q)
        g_expected = np.array([[[1, 0, 0, 0, 1, -1, 1]],
                               [[0, 1, 0, -1, 0, 1, 0]],
                               [[0, 0, 1, 1, -1, 0, -1]]])
        self.assertTrue(np.allclose(g, g_expected))

        # Relative transform.
        q[:] = 0
        q[6] = np.pi / 2
        kinsol = tree.doKinematics(q)
        T = tree.relativeTransform(kinsol, 1, 2)
        T_expected = np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0],
                               [0, 0, 0, 1]])
        self.assertTrue(np.allclose(T, T_expected))

        # Relative RPY, checking autodiff (#9886).
        q[:] = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7]
        q_ad = np.array([AutoDiffXd(x) for x in q])
        world = tree.findFrame("world")
        frame = tree.findFrame("arm_com")
        kinsol = tree.doKinematics(q)
        rpy = tree.relativeRollPitchYaw(
            cache=kinsol,
            from_body_or_frame_ind=world.get_frame_index(),
            to_body_or_frame_ind=frame.get_frame_index())
        kinsol_ad = tree.doKinematics(q_ad)
        rpy_ad = tree.relativeRollPitchYaw(
            cache=kinsol_ad,
            from_body_or_frame_ind=world.get_frame_index(),
            to_body_or_frame_ind=frame.get_frame_index())
        for x, x_ad in zip(rpy, rpy_ad):
            self.assertEqual(x, x_ad.value())

        # Do FK and compare pose of 'arm' with expected pose.
        q[:] = 0
        q[6] = np.pi / 2
        kinsol = tree.doKinematics(q)
        T = tree.CalcBodyPoseInWorldFrame(kinsol, tree.FindBody("arm"))
        T_expected = np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0],
                               [0, 0, 0, 1]])
        self.assertTrue(np.allclose(T, T_expected))

        # Geometric Jacobian.
        # Construct a new tree with a quaternion floating base.
        tree = RigidBodyTree(
            FindResourceOrThrow("drake/examples/pendulum/Pendulum.urdf"),
            floating_base_type=FloatingBaseType.kQuaternion)
        num_q = 8
        num_v = 7
        self.assertEqual(tree.number_of_positions(), num_q)
        self.assertEqual(tree.number_of_velocities(), num_v)

        q = tree.getZeroConfiguration()
        v = np.zeros(num_v)
        kinsol = tree.doKinematics(q, v)
        # - Sanity check sizes.
        J_default, v_indices_default = tree.geometricJacobian(kinsol, 0, 2, 0)
        J_eeDotTimesV = tree.geometricJacobianDotTimesV(kinsol, 0, 2, 0)

        self.assertEqual(J_default.shape[0], 6)
        self.assertEqual(J_default.shape[1], num_v)
        self.assertEqual(len(v_indices_default), num_v)
        self.assertEqual(J_eeDotTimesV.shape[0], 6)

        # - Check QDotToVelocity and VelocityToQDot methods
        q = tree.getZeroConfiguration()
        v_real = np.zeros(num_v)
        q_ad = np.array(list(map(AutoDiffXd, q)))
        v_real_ad = np.array(list(map(AutoDiffXd, v_real)))

        kinsol = tree.doKinematics(q)
        kinsol_ad = tree.doKinematics(q_ad)
        qd = tree.transformVelocityToQDot(kinsol, v_real)
        v = tree.transformQDotToVelocity(kinsol, qd)
        qd_ad = tree.transformVelocityToQDot(kinsol_ad, v_real_ad)
        v_ad = tree.transformQDotToVelocity(kinsol_ad, qd_ad)
        self.assertEqual(qd.shape, (num_q, ))
        self.assertEqual(v.shape, (num_v, ))
        self.assertEqual(qd_ad.shape, (num_q, ))
        self.assertEqual(v_ad.shape, (num_v, ))

        v_to_qdot = tree.GetVelocityToQDotMapping(kinsol)
        qdot_to_v = tree.GetQDotToVelocityMapping(kinsol)
        v_to_qdot_ad = tree.GetVelocityToQDotMapping(kinsol_ad)
        qdot_to_v_ad = tree.GetQDotToVelocityMapping(kinsol_ad)
        self.assertEqual(v_to_qdot.shape, (num_q, num_v))
        self.assertEqual(qdot_to_v.shape, (num_v, num_q))
        self.assertEqual(v_to_qdot_ad.shape, (num_q, num_v))
        self.assertEqual(qdot_to_v_ad.shape, (num_v, num_q))

        v_map = tree.transformVelocityMappingToQDotMapping(
            kinsol, np.eye(num_v))
        qd_map = tree.transformQDotMappingToVelocityMapping(
            kinsol, np.eye(num_q))
        v_map_ad = tree.transformVelocityMappingToQDotMapping(
            kinsol_ad, np.eye(num_v))
        qd_map_ad = tree.transformQDotMappingToVelocityMapping(
            kinsol_ad, np.eye(num_q))
        self.assertEqual(v_map.shape, (num_v, num_q))
        self.assertEqual(qd_map.shape, (num_q, num_v))
        self.assertEqual(v_map_ad.shape, (num_v, num_q))
        self.assertEqual(qd_map_ad.shape, (num_q, num_v))

        # - Check FindBody and FindBodyIndex methods
        body_name = "arm"
        body = tree.FindBody(body_name)
        self.assertEqual(body.get_body_index(), tree.FindBodyIndex(body_name))

        # - Check ChildOfJoint methods
        body = tree.FindChildBodyOfJoint("theta")
        self.assertIsInstance(body, RigidBody)
        self.assertEqual(body.get_name(), "arm")
        self.assertEqual(tree.FindIndexOfChildBodyOfJoint("theta"), 2)

        # - Check that default value for in_terms_of_qdot is false.
        J_not_in_terms_of_q_dot, v_indices_not_in_terms_of_qdot = \
            tree.geometricJacobian(kinsol, 0, 2, 0, False)
        self.assertTrue((J_default == J_not_in_terms_of_q_dot).all())
        self.assertEqual(v_indices_default, v_indices_not_in_terms_of_qdot)

        # - Check with in_terms_of_qdot set to True.
        J_in_terms_of_q_dot, v_indices_in_terms_of_qdot = \
            tree.geometricJacobian(kinsol, 0, 2, 0, True)
        self.assertEqual(J_in_terms_of_q_dot.shape[0], 6)
        self.assertEqual(J_in_terms_of_q_dot.shape[1], num_q)
        self.assertEqual(len(v_indices_in_terms_of_qdot), num_q)

        # - Check transformPointsJacobian methods
        q = tree.getZeroConfiguration()
        v = np.zeros(num_v)
        kinsol = tree.doKinematics(q, v)
        Jv = tree.transformPointsJacobian(cache=kinsol,
                                          points=np.zeros(3),
                                          from_body_or_frame_ind=0,
                                          to_body_or_frame_ind=1,
                                          in_terms_of_qdot=False)
        Jq = tree.transformPointsJacobian(cache=kinsol,
                                          points=np.zeros(3),
                                          from_body_or_frame_ind=0,
                                          to_body_or_frame_ind=1,
                                          in_terms_of_qdot=True)
        JdotV = tree.transformPointsJacobianDotTimesV(cache=kinsol,
                                                      points=np.zeros(3),
                                                      from_body_or_frame_ind=0,
                                                      to_body_or_frame_ind=1)
        self.assertEqual(Jv.shape, (3, num_v))
        self.assertEqual(Jq.shape, (3, num_q))
        self.assertEqual(JdotV.shape, (3, ))

        # - Check that drawKinematicTree runs
        tree.drawKinematicTree(
            join(os.environ["TEST_TMPDIR"], "test_graph.dot"))

        # - Check relative twist method
        twist = tree.relativeTwist(cache=kinsol,
                                   base_or_frame_ind=0,
                                   body_or_frame_ind=1,
                                   expressed_in_body_or_frame_ind=0)
        self.assertEqual(twist.shape[0], 6)
def finite_difference_check_autodiffs(autodiff_params=False, debug=False):
    print("autodiff_params: ", autodiff_params)
    NUM_INPUTS = 3
    NUM_OUTPUTS = 3
    NETS_TO_TEST = [
        FC,
        FCBIG,
        MLPSMALL,
        #MLP
    ]

    for kNetConstructor in NETS_TO_TEST:
        print(kNetConstructor.__name__, end='')
        if debug: np.random.seed(1), torch.manual_seed(1)

        # Make a net and calculate n_inputs and n_outputs
        network = kNetConstructor(n_inputs=NUM_INPUTS, n_outputs=NUM_OUTPUTS)
        param_dims = list(param.size() for param in network.parameters())
        n_params = np.sum(np.prod(param_dim) for param_dim in param_dims)
        n_inputs = param_dims[0][-1]
        n_outputs = param_dims[-1][-1]
        total_params = n_inputs + n_params if autodiff_params else n_inputs
        param_vec = np.array([])

        def one_hot(i, N):
            ret = np.zeros(N)
            ret[i] = 1
            return ret

        if debug:
            for param in network.parameters():
                torch.nn.init.uniform_(param.data, 1, 1)

        # Make total_param number of AutoDiffXd's, with (seeded) random values.
        # Set derivatives array to length total_param with only index i set for ith AutoDiff.
        values = (np.ones(n_inputs) if debug else np.random.randn(n_inputs))
        in_vec = np.array([
            AutoDiffXd(values[i], one_hot(i, total_params))
            for i in range(n_inputs)
        ])
        if autodiff_params:
            values = np.hstack(param.clone().detach().numpy().flatten()
                               for param in network.parameters())
            param_vec = np.array([
                AutoDiffXd(values[i], one_hot(n_inputs + i, total_params))
                for i in range(n_params)
            ])
            nn_loader(param_vec, network)

        # First, generate all the AutoDiffXd outputs.
        out_vec = NNInferenceHelper_autodiff(network,
                                             in_vec,
                                             param_vec=param_vec,
                                             debug=debug)
        if debug:
            print("param grads: ",
                  [param.grad for param in network.parameters()])

        # f     : function(np.array of AutoDiffXd's) -> array of size one of AutoDiffXd
        # x     : np.array of AutoDiffXd at which to calculate finite_difference
        # idx   : Index of AutoDiffXd in x to perturb
        # delta : magnitude of perturbation of AutoDiffXd at index idx of x
        def finite_difference(f, x, idx, delta=1e-7):
            x_hi = copy.deepcopy(x)
            x_hi[idx] += delta
            x_lo = copy.deepcopy(x)
            x_lo[idx] -= delta
            out_hi = np.array([elem.value() for elem in f(x_hi)])
            out_lo = np.array([elem.value() for elem in f(x_lo)])
            return (out_hi - out_lo) / (2 * delta)

        # AutoDiff method returns a list of AutoDiffXd's by output.  It is
        # like a Hessian matrix that is n_output x n_input
        # Repeatedly finite differencing every input will return me vectors of
        # dervivates at each output.  This is like a Hessian that is n_input x n_output.
        # Let's take both hessians, transpose one, and compare them for equality.

        # Hessian will be of size total_params**2, let's not let that get too big.
        assert total_params < 1e3

        # Make AutoDiffXd Hessian, (n_output x n_input)
        ad_hess = np.array([elem.derivatives() for elem in out_vec])

        # Make Finite Difference Hessian, (n_input x n_output)
        def fn(x):
            # Wrapper for NNInferenceHelper_autodiff that accepts a single list of gradients and
            # assigns first n_inputs to in_list, and all the rest to param_list.
            # Also creates a fresh NN using param_list.
            in_vec = x[:n_inputs]
            param_vec = x[n_inputs:]
            if param_vec.size:
                nn_loader(param_vec, network)
            return NNInferenceHelper_autodiff(network,
                                              in_vec,
                                              param_vec=param_vec,
                                              debug=False)

        fd_hess = np.array([
            finite_difference(fn, np.hstack((in_vec, param_vec)), inp_idx)
            for inp_idx in range(total_params)
        ])

        # Do our comparison.
        if False:
            print('fd_hess.T: ', fd_hess.T)
            print('ad_hess:   ', ad_hess)
        np.testing.assert_allclose(fd_hess.T, ad_hess, atol=1e-3)
        print("\t ...GOOD")
    Simulator,
    VectorSystem,
)

from NNSystem import NNSystem, NNSystem_, NNInferenceHelper_double, NNInferenceHelper_autodiff, nn_loader
from networks import FC, FCBIG, MLPSMALL, MLP

# Test 1
# Try creating a simple NNSystem and show that it supports AutoDiffXd's.
# Define network, input
torch.set_default_tensor_type('torch.DoubleTensor')
net = FC(n_inputs=2, n_outputs=1)
print("net.parameters()= ", list(net.parameters()))
#net = MLP(n_inputs=2, n_outputs=1, h_sz=2)
autodiff_in = np.array([[
    AutoDiffXd(1., [1., 0.]),
], [
    AutoDiffXd(1., [0., 1.]),
]])

# Make system and give input
nn_system = NNSystem_[AutoDiffXd](pytorch_nn_object=net)
context = nn_system.CreateDefaultContext()
context.FixInputPort(0, autodiff_in)

# Allocate and Eval output
output = nn_system.AllocateOutput()
nn_system.CalcOutput(context, output)
autodiff = output.get_vector_data(0).CopyToVector()[0]
print("autodiff: ", autodiff)
print("derivatives: ", autodiff.derivatives())
from pydrake.systems.framework import (AbstractValue, BasicVector,
                                       BasicVector_)

from NNSystem import NNSystem, NNSystem_
from NNTestSetup import NNTestSetup
from NNSystemHelper import FC, MLP

# nn_test_setup = NNTestSetup(pytorch_nn_object=MLP())
# nn_test_setup.RunSimulation()

# Define network, input
torch.set_default_tensor_type('torch.DoubleTensor')
net = FC()
net = MLP()
autodiff_in = np.array([[
    AutoDiffXd(1.0, [1.0, 0.0]),
], [
    AutoDiffXd(1.0, [1.0, 0.0]),
]])

# Make system and give input
nn_system = NNSystem_[AutoDiffXd](pytorch_nn_object=net)
context = nn_system.CreateDefaultContext()
context.FixInputPort(0, autodiff_in)

# Allocate and Eval output
output = nn_system.AllocateOutput()
nn_system.CalcOutput(context, output)
autodiff = output.get_vector_data(0).CopyToVector()[0]
print("autodiff: ", autodiff)
print("derivatives: ", autodiff.derivatives())
Example #14
0
import pydrake
import torch
import numpy as np

import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

torch.manual_seed(1)

lin = nn.Linear(5, 3)  # maps from R^5 to R^3, parameters A, b
data = torch.randn(
    2, 5)  # data is 2x5.  A maps from 5 to 3... can we map "data" under A?
print(F.relu(lin(data)))

import pydrake
import pydrake.autodiffutils
from pydrake.autodiffutils import AutoDiffXd

x = AutoDiffXd(1.5)
arr = np.ndarray((3, ), buffer=np.array([1, 2, 3]), dtype=np.float64)
y = AutoDiffXd(1.5, arr)
for var in (x, y):
    print(var.value(), var.derivatives())
Example #15
0
 def test_div(self):
     x = AutoDiffXd(1, [1., 0])
     y = x/2.
     self.assertAlmostEquals(y.value(), .5)
     np.testing.assert_almost_equal(y.derivatives(), [.5, 0.])
Example #16
0
 def test_pow(self):
     x = AutoDiffXd(1., [1., 0., 0.])
     y = x**2
     self.assertAlmostEquals(y.value(), 1.)
     np.testing.assert_almost_equal(y.derivatives(), [2., 0., 0.])