コード例 #1
0
def generate_random_trajectory(robot, npts, maxd, randskip=0):
    # Use to skip configuration. Have no idea how to set random seed
    # in pinocchio.
    for i in range(randskip):
        pinocchio.randomConfiguration(robot.model)

    ts = [
        0.0,
    ]
    qs = [
        pinocchio.randomConfiguration(robot.model),
    ]
    while len(qs) < npts:
        qlast = qs[-1]
        q = pinocchio.randomConfiguration(robot.model)
        d = pinocchio.distance(robot.model, qlast, q)
        if d > maxd:
            q = pinocchio.interpolate(robot.model, qlast, q, maxd / d)
        qs.append(q)
        ts.append(ts[-1] + pinocchio.distance(robot.model, qlast, q))

    # Compute velocities and accelerations
    vs = [
        np.zeros(robot.model.nv),
    ]
    accs = [
        np.zeros(robot.model.nv),
    ]
    eps = 0.01
    for q0, q1, q2 in zip(qs, qs[1:], qs[2:]):
        qprev = pinocchio.interpolate(
            robot.model, q0, q1, 1 - eps / pinocchio.distance(robot.model, q0, q1)
        )
        qnext = pinocchio.interpolate(
            robot.model, q1, q2, eps / pinocchio.distance(robot.model, q1, q2)
        )
        # (qnext - qprev) / eps
        vs.append(pinocchio.difference(robot.model, qprev, qnext) / eps)
        # ((qnext - q) - (q - qprev)) / eps^2
        accs.append(
            (
                pinocchio.difference(robot.model, q, qnext)
                - pinocchio.difference(robot.model, qprev, q)
            )
            / (eps ** 2)
        )
    vs.append(np.zeros(robot.model.nv))
    accs.append(np.zeros(robot.model.nv))

    from toppra.cpp import PiecewisePolyPath
    return PiecewisePolyPath.constructHermite(qs, vs, ts)
コード例 #2
0
ファイル: __init__.py プロジェクト: iit-DLSLab/crocoddyl
 def diff(self, x0, x1):
     q0 = x0[:self.nq]
     q1 = x1[:self.nq]
     v0 = x0[-self.nv:]
     v1 = x1[-self.nv:]
     dq = pinocchio.difference(self.model, q0, q1)
     return np.concatenate([dq, v1 - v0])
コード例 #3
0
ファイル: gaits_solo12.py プロジェクト: skleff1994/consim
def state_diff(robot, x1, x2):
    """ returns x2 - x1 """
    xdiff = np.zeros([2 * robot.model.nv, 1])
    xdiff[:robot.model.nv] = pin.difference(robot.model, x1[:robot.model.nq],
                                            x2[:robot.model.nq])
    xdiff[robot.model.nv:] = x2[robot.model.nq:] - x1[robot.model.nq:]
    return xdiff
コード例 #4
0
ファイル: simu_cpp_common.py プロジェクト: skleff1994/consim
def compute_integration_errors(data, robot, dt):
    print('\n')
    res = Empty()
    res.ndt, res.dt, res.comp_time, res.realtime_factor = {}, {}, {}, {}
    res.err_2norm_avg, res.err_infnorm_avg, res.err_infnorm_max = {}, {}, {}
    res.err_traj_2norm, res.err_traj_infnorm = {}, {}
    res.mat_mult, res.mat_norm = {}, {}
    for name in sorted(data.keys()):
        if('ground-truth' in name): continue
        d = data[name]
        data_ground_truth = data['ground-truth-'+d.simulator]
#        if(d.simulator=='exponential'): data_ground_truth = data['ground-truth-exp']
#        elif(d.simulator=='euler'): data_ground_truth = data['ground-truth-euler']
#        elif(d.simulator=='implicit-euler'): data_ground_truth = data['ground-truth-implicit-euler']
#        elif(d.simulator=='rk4'): data_ground_truth = data['ground-truth-rk4']
#        else: raise BaseException('Ground truth method not recognized ')
        
        err_vec = np.empty((2*robot.nv, d.q.shape[1]))
        err_per_time_2 = np.empty(d.q.shape[1])
        err_per_time_inf = np.empty(d.q.shape[1])
        err_2, err_inf = 0.0, 0.0
        for i in range(d.q.shape[1]):
            err_vec[:robot.nv,i] = pin.difference(robot.model, d.q[:,i], data_ground_truth.q[:,i])
            err_vec[robot.nv:,i] = d.v[:,i] - data_ground_truth.v[:,i]
            err_per_time_2[i]   = norm(err_vec[:,i])
            err_per_time_inf[i] = norm(err_vec[:,i], np.inf)
            err_2   += err_per_time_2[i]
            err_inf += err_per_time_inf[i]
        err_2 /= d.q.shape[1]
        err_inf /= d.q.shape[1]
        err_peak = np.max(err_per_time_inf)
        print(name, 'Log error inf-norm: %.2f'%np.log10(err_inf))
        if(d.method_name not in res.err_2norm_avg):
            for k in res.__dict__.keys():
                res.__dict__[k][d.method_name] = []

        res.err_2norm_avg[d.method_name] += [err_2]
        res.err_infnorm_avg[d.method_name] += [err_inf]
        res.err_infnorm_max[d.method_name] += [err_peak]
        res.err_traj_2norm[name] = err_per_time_2
        res.err_traj_infnorm[name] = err_per_time_inf
        res.ndt[d.method_name] += [d.ndt]
        res.dt[d.method_name] += [dt/d.ndt]
        try:
            comp_time = d.computation_times['substep'].avg * d.ndt
        except:
            comp_time = np.nan
        res.comp_time[d.method_name] += [comp_time]
        res.realtime_factor[d.method_name] += [dt/comp_time]
        if(d.simulator=='exponential'):
            try:
                res.mat_mult[d.method_name] += [np.mean(d.mat_mult)]
                res.mat_norm[d.method_name] += [np.mean(d.mat_norm)]
            except:
                pass
    return res
コード例 #5
0
    def differentiatePostures(self, ini_state, end_state, dt):
        q_ini = np.transpose(
            np.matrix(ini_state.robot_posture.generalized_joint_positions()))
        q_end = np.transpose(
            np.matrix(end_state.robot_posture.generalized_joint_positions()))
        q_dot = pin.difference(self.robot.model, q_ini, q_end) / dt
        end_state.robot_velocity.generalized_joint_velocities = np.squeeze(
            np.asarray(q_dot))

        return end_state
コード例 #6
0
 def dCostQ_dx(self, x):
     '''
     ddiff_dx2(x1,x2) = dint_dv(x1,x2-x1)^-1
     ddiff_dq( refQ,q) = dint_dv(refQ,q-refQ)
     '''
     q = self.vq2q(a2m(x))
     dq = pinocchio.difference(self.rmodel, self.refQ, q)
     dDiff = inv(pinocchio.dIntegrate(self.rmodel, self.refQ, dq)[1])
     grad = dDiff[6:, :].T * dq[6:]
     return m2a(grad)
コード例 #7
0
    def diff(self, x0, x1):
        """ Difference between the robot's states x2 and x1.

        :param x1: current state
        :param x2: next state
        :return x2 [-] x1 value
        """
        nq, nv, nx = self.model.nq, self.model.nv, self.nx
        assert (x0.shape == (nx, ) and x1.shape == (nx, ))
        q0 = x0[:nq]
        q1 = x1[:nq]
        v0 = x0[-nv:]
        v1 = x1[-nv:]
        dq = pinocchio.difference(self.model, a2m(q0), a2m(q1))
        return np.concatenate([dq.flat, v1 - v0])
コード例 #8
0
    def test_basic(self):
        model = self.model

        q_ones = np.ones((model.nq))
        self.assertFalse(pin.isNormalized(model, q_ones))
        self.assertFalse(pin.isNormalized(model, q_ones, 1e-8))
        self.assertTrue(pin.isNormalized(model, q_ones, 1e2))

        q_rand = np.random.rand((model.nq))
        q_rand = pin.normalize(model, q_rand)
        self.assertTrue(pin.isNormalized(model, q_rand))
        self.assertTrue(pin.isNormalized(model, q_rand, 1e-8))

        self.assertTrue(abs(np.linalg.norm(q_rand[3:7]) - 1.) <= 1e-8)

        q_next = pin.integrate(model, self.q, np.zeros((model.nv)))
        self.assertApprox(q_next, self.q)

        v_diff = pin.difference(model, self.q, q_next)
        self.assertApprox(v_diff, np.zeros((model.nv)))

        q_next = pin.integrate(model, self.q, self.v)
        q_int = pin.interpolate(model, self.q, q_next, 0.5)

        self.assertApprox(q_int, q_int)

        value = pin.squaredDistance(model, self.q, self.q)
        self.assertTrue((value <= 1e-8).all())

        dist = pin.distance(model, self.q, self.q)
        self.assertTrue(dist <= 1e-8)

        q_neutral = pin.neutral(model)
        self.assertApprox(q_neutral, q_neutral)

        q_rand1 = pin.randomConfiguration(model)
        q_rand2 = pin.randomConfiguration(model, -np.ones((model.nq)),
                                          np.ones((model.nq)))

        self.assertTrue(pin.isSameConfiguration(model, self.q, self.q, 1e-8))

        self.assertFalse(pin.isSameConfiguration(model, q_rand1, q_rand2,
                                                 1e-8))
コード例 #9
0
def compute_integration_errors_vs_k_b(data, data_gt, robot, dt):
    print('\n')
    res = Empty()
    res.k, res.damping_ratio, res.comp_time, res.realtime_factor = {}, {}, {}, {}
    res.err_infnorm_avg, res.err_infnorm_max = {}, {}
    res.err_traj_infnorm = {}

    for name in sorted(data.keys()):
        if ('ground-truth' in name): continue
        d = data[name]
        data_ground_truth = data_gt[name]

        err_vec = np.empty((2 * robot.nv, d.q.shape[1]))
        err_per_time_inf = np.empty(d.q.shape[1])
        err_inf = 0.0
        for i in range(d.q.shape[1]):
            err_vec[:robot.nv, i] = pin.difference(robot.model, d.q[:, i],
                                                   data_ground_truth.q[:, i])
            err_vec[robot.nv:, i] = d.v[:, i] - data_ground_truth.v[:, i]
            err_per_time_inf[i] = norm(err_vec[:, i], np.inf)
            err_inf += err_per_time_inf[i]
        err_inf /= d.q.shape[1]
        err_peak = np.max(err_per_time_inf)
        if (d.method_name not in res.err_infnorm_avg):
            for k in res.__dict__.keys():
                res.__dict__[k][d.method_name] = []

        res.err_infnorm_avg[d.method_name] += [err_inf]
        res.err_infnorm_max[d.method_name] += [err_peak]
        res.err_traj_infnorm[name] = err_per_time_inf
        res.k[d.method_name] += [d.K]
        res.damping_ratio[d.method_name] += [d.damping_ratio]
        comp_time = d.computation_times['substep'].avg * d.ndt
        res.comp_time[d.method_name] += [comp_time]
        res.realtime_factor[d.method_name] += [dt / comp_time]
        try:
            print(name, 'Log error inf-norm: %.2f' % np.log10(err_inf),
                  'RT factor', int(dt / comp_time))
        except:
            pass
    return res
コード例 #10
0
    def execute_motion(self, plotting=False, tune_online=False):
        sim = self.sim
        P, D = 10. * np.ones(8), 0.3 * np.ones(8)

        for i in range(8):
            if "HFE" in self.robot.model.names[int(sim.pinocchio_joint_ids[i])]:
                D[i] = 0.5

            if "FR" in self.robot.model.names[int(sim.pinocchio_joint_ids[i])] or "FL" in self.robot.model.names[int(sim.pinocchio_joint_ids[i])]:
                D[i] = 0.5

        num_uncontrolled_joints = 6

        # Apply gains to reach steady state
        loop = 0
        try:
            while loop < 2000:
                q, dq = sim.get_state()

                ptau = np.diag(P) * se3.difference(self.robot.model, q, self.init_config)[6:]
                ptau += np.diag(D) * -dq[6:]
                self.limit_torques(ptau)

                sim.send_joint_command(ptau)
                sim.step()

                loop += 1

        except KeyboardInterrupt:
            print("Keyboard interrupt")

        desired_pos = desired_state("POSITION", self.time_vector, optimized_sequence=self.optimized_kin_plan)
        desired_vel = desired_state("VELOCITY", self.time_vector, optimized_sequence=self.optimized_kin_plan)
        desired_com_func = desired_state("COM", self.time_vector, optimized_sequence=self.optimized_kin_plan)
        desired_lmom_func = desired_state("LMOM", self.time_vector, optimized_sequence=self.optimized_kin_plan)
        desired_amom_func = desired_state("AMOM", self.time_vector, optimized_sequence=self.optimized_kin_plan)
        if not self.dynamics_feedback is None:
            dyn_feedback = desired_state("DYN_FEEDBACK", self.time_vector, dynamics_feedback=self.dynamics_feedback)

        time_horizon = 4.0
        max_num_iterations = int(time_horizon * 1000)

        desired_pos_arr = np.zeros((max_num_iterations, len(self.controlled_joints)))
        desired_vel_arr = np.zeros((max_num_iterations, len(self.controlled_joints)))
        actual_pos_arr = np.zeros((max_num_iterations, len(self.controlled_joints)))
        actual_vel_arr = np.zeros((max_num_iterations, len(self.controlled_joints)))
        base_states = np.zeros((max_num_iterations, 7))
        tau_arr = np.zeros((max_num_iterations, len(self.controlled_joints)))
        forces_arr = np.zeros((max_num_iterations, len(sim.pinocchio_endeff_ids), 6))

        t_vec = np.zeros((max_num_iterations))

        # t_0 = time()

        robot_weight = self.planner_setting.get(PlannerDoubleParam_RobotWeight)

        jacobians_eff = {}
        for eff in self.robot.effs:
            for joint in self.robot.joints_list:
                joint_identifier = eff + "_" + joint
                jacobians_eff[joint_identifier] = self.robot.get_jacobian(joint_identifier, "TRANSLATION")

        # Apply gains for trajectory tracking
        try:
            print("Executing motion...")
            executing = True
            # t_0 = None
            # t_1 = None

            force_offset = 0

            while executing:
                loop = 0
                time_id = 0

                swing_times = {}
                for eff in self.robot.effs:
                    swing_times[eff] = []

                while loop < max_num_iterations:
                    t = loop / 1e3
                    if t > self.time_vector[time_id]:
                        time_id += 1

                    des_pos = desired_pos(t)
                    des_vel = desired_vel(t)

                    q, dq = sim.get_state()
                    frame_ids, forces = sim.get_force()

                    q_des = q.copy()
                    q_des[7:] = des_pos.reshape((-1, 1))
                    dq_des = dq.copy()
                    dq_des[6:] = des_vel.reshape((-1, 1))

                    ptau = np.diag(P) * se3.difference(self.robot.model, q, q_des)[6:]
                    ptau += np.diag(D) * (dq_des - dq)[6:]

                    planned_force = np.zeros((3 * len(self.robot.effs)))
                    jacobians_effs = np.zeros((3 * len(self.robot.effs), self.robot.robot.nv))
                    for eff_id, eff in enumerate(self.robot.effs):
                        eff = eff + "_ANKLE"
                        force = self.optimized_dyn_plan.dynamics_states[time_id].effForce(eff_id) * robot_weight
                        # force = self.optimized_dyn_plan.dynamics_states[min(time_id - force_offset, len(self.time_vector) - 1)].effForce(eff_id) * robot_weight
                        # force = self.optimized_dyn_plan.dynamics_states[max(time_id - force_offset, 0)].effForce(eff_id) * robot_weight
                        planned_force[eff_id * 3 : eff_id * 3 + 3] = force
                        jacobians_effs[eff_id * 3 : eff_id * 3 + 3, :] = jacobians_eff[eff]()

                    if self.dynamics_feedback is None:
                        use_dyn_feedback = False
                    else:
                        use_dyn_feedback = True

                    if use_dyn_feedback:
                        if loop == 0:
                            delta_t = 0
                        else:
                            delta_t = t - t_vec[-1]
                        joint_configuration = q[7:].reshape((-1))
                        base_state = np.zeros((7))
                        base_state[:3], base_state[3:] = p.getBasePositionAndOrientation(self.robotId)
                        com, lmom, amom = self.calculate_momentum(delta_t, joint_configuration, base_state)
                        desired_momentum = np.hstack((desired_com_func(t), desired_lmom_func(t), desired_amom_func(t)))
                        current_momentum = np.hstack((com, lmom, amom))
                        delta_momentum = current_momentum - desired_momentum
                        delta_force = np.dot(dyn_feedback(t), delta_momentum)
                    else:
                        delta_force = np.zeros_like(planned_force)

                    ptau += np.reshape(np.transpose(np.dot(np.transpose(jacobians_effs), planned_force + delta_force))[6:], (ptau.shape[0], 1))

                    self.limit_torques(ptau)

                    sim.send_joint_command(ptau)

                    desired_pos_arr[loop, :] = des_pos
                    desired_vel_arr[loop, :] = des_vel
                    actual_pos_arr[loop, :] = q[7:].reshape((-1))
                    actual_vel_arr[loop, :] = dq[6:].reshape((-1))
                    base_state_and_orientation = p.getBasePositionAndOrientation(self.robotId)
                    base_states[loop, :3] = base_state_and_orientation[0]
                    base_states[loop, 3:] = base_state_and_orientation[1]
                    t_vec[loop] = t
                    tau_arr[loop, :] = np.squeeze(np.array(ptau), 1)
                    for cnt_idx in range(len(forces)):
                        endeff_id = np.where(np.array(sim.pinocchio_endeff_ids) == frame_ids[cnt_idx])[0][0]
                        forces_arr[loop, endeff_id, :] = forces[cnt_idx]
                        robot_endeff = self.robot.effs[endeff_id]

                        # Determine swing times
                        if np.sum(np.abs(forces[cnt_idx])) < 1e-3:
                            if len(swing_times[robot_endeff]) == 0:
                                swing_times[robot_endeff] = [[t]]
                            elif len(swing_times[robot_endeff][-1]) == 2:
                                swing_times[robot_endeff].append([t])
                        else:
                            if len(swing_times[robot_endeff]) > 0:
                                if len(swing_times[robot_endeff][-1]) == 1:
                                    swing_times[robot_endeff][-1].append(t)

                    sim.step()
                    # sleep(0.001)

                    loop += 1

                # force_offset += 1
                actual_com, actual_lmom, actual_amom = self.calculate_actual_trajectories(loop, t_vec, actual_pos_arr, base_states)

                desired_com = np.zeros((len(self.time_vector), 3))
                desired_lmom = np.zeros((len(self.time_vector), 3))
                desired_amom = np.zeros((len(self.time_vector), 3))
                for t in range(len(self.time_vector)):
                    desired_com[t, :] = self.optimized_kin_plan.kinematics_states[t].com
                    desired_lmom[t, :] = self.optimized_kin_plan.kinematics_states[t].lmom
                    desired_amom[t, :] = self.optimized_kin_plan.kinematics_states[t].amom

                # Apply delta to desired_com, because of different initial positions
                desired_com += actual_com[0, :] - desired_com[0, :]

                actual_trajectories = {"joint_configs": actual_pos_arr, "joint_velocities": actual_vel_arr,
                                       "COM": actual_com, "LMOM": actual_lmom, "AMOM": actual_amom}
                desired_trajectories = {"joint_configs": desired_pos_arr, "joint_velocities": desired_vel_arr,
                                        "COM": desired_com, "LMOM": desired_lmom, "AMOM": desired_amom}

                if plotting:
                    self.plot_execution(t_vec, loop, desired_trajectories, actual_trajectories, swing_times)
                    self.plot_torques(t_vec, loop, tau_arr, swing_times)
                    self.plot_forces(t_vec, loop, forces_arr, swing_times)

                if tune_online:
                    P, D = self.tunePD(P, D)
                else:
                    executing = False
                    # pass

            print("...Finished execution.")

        except KeyboardInterrupt:
            print("Keyboard interrupt")
コード例 #11
0
 def costQ(self, x):
     q = self.vq2q(a2m(x))
     self.residuals = m2a(
         pinocchio.difference(self.rmodel, self.refQ, q)[6:])
     return sum(self.residuals**2)
コード例 #12
0
 def q2vq(self, q):
     return pinocchio.difference(self.rmodel, self.q0, q)
コード例 #13
0
ファイル: costs.py プロジェクト: gfadini/summer-school
 def residual(self, q):
     return pin.difference(self.rmodel, q, self.qref)
コード例 #14
0
 def calc(self, q):
     self.res = self.weights * pin.difference(self.rmodel,
                                              self.desired_posture, q)
     return self.res
コード例 #15
0
 def get_difference(self, q_1, q_2):
     return pinocchio.difference(self.model, q_1, q_2)
コード例 #16
0
 def costQ(self, x):
     q, tauq, fr, fl = self.x2qf(x)
     self.residuals[:] = pinocchio.difference(self.rmodel, self.refQ,
                                              q)[6:].flat
     return sum(self.residuals**2)