Esempio n. 1
0
    def new_episode(self):
        self.vehicle_collided = None
        self.episode_started = True
        self.timestamp = time.time()
        self.timestamp0 = self.timestamp

        max_tries = 100
        for _  in range(max_tries):
            episode = self.world.new_episode()
            # TODO: change name
            pose = episode.vehicle_state
            SE3.belongs(pose)
            primitives = self.world.get_primitives()
            check_primitives(primitives)
            self.vehicle.set_world_primitives(primitives)
            collision = self.vehicle.colliding_pose(pose,
                                    safety_margin=self.safety_margin)
            if not collision.collided:
                self.vehicle.set_pose(pose)
                return episode
            # print('Bad random: collision  %s' % str(collision))
        else:
            msg = ('Cannot find a non-colliding state after %d tries.'
                    % max_tries)
            raise Exception(msg)
Esempio n. 2
0
    def new_episode(self):
        self.vehicle_collided = None
        self.episode_started = True
        self.timestamp = time.time()
        self.timestamp0 = self.timestamp

        max_tries = 100
        for _ in range(max_tries):
            episode = self.world.new_episode()
            # TODO: change name
            pose = episode.vehicle_state
            SE3.belongs(pose)
            primitives = self.world.get_primitives()
            check_primitives(primitives)
            self.vehicle.set_world_primitives(primitives)
            collision = self.vehicle.colliding_pose(
                pose, safety_margin=self.safety_margin)
            if not collision.collided:
                self.vehicle.set_pose(pose)
                return episode
            # print('Bad random: collision  %s' % str(collision))
        else:
            msg = ('Cannot find a non-colliding state after %d tries.' %
                   max_tries)
            raise Exception(msg)
def read_pose_data(bds):
    for bd in bds:
        extra = bd['extra'].item()
        if not 'robot_pose' in extra:
            msg = 'Could not find pose "odom" in extra.'
            raise Exception(msg)
        pose = np.array(extra['robot_pose'])
        SE3.belongs(pose)
        yield bd, pose
Esempio n. 4
0
def servo_stats_summary(data_central, id_agent, id_robot, id_episode):  # @UnusedVariable
    from geometry import (SE2, SE2_from_SE3, translation_from_SE2,
                          angle_from_SE2, SE3)

    log_index = data_central.get_log_index()
    errors = []
    timestamps = []
    poses = []
    dist_xy = []
    dist_th = []
    for observations in \
        log_index.read_robot_episode(id_robot, id_episode,
                                     read_extra=True):
        extra = observations['extra'].item()

        servoing = extra.get('servoing', None)
        if servoing is None:
            logger.error('Warning, no "servoing" in episode %r.' % 
                          id_episode)
            break

        obs0 = np.array(servoing['obs0'])
        pose0 = SE2_from_SE3(SE3.from_yaml(servoing['pose0']))
        pose1 = SE2_from_SE3(SE3.from_yaml(servoing['pose1']))
        poseK = SE2_from_SE3(SE3.from_yaml(servoing['poseK']))

        pose1 = SE2.multiply(SE2.inverse(pose0), pose1)
        poseK = SE2.multiply(SE2.inverse(pose0), poseK)
        poses.append(poseK)

        dist_xy.append(np.linalg.norm(translation_from_SE2(poseK)))
        dist_th.append(np.abs(angle_from_SE2(poseK)))

        # obs1 = np.array(servoing['obs1'])
        obsK = np.array(servoing['obsK'])

        err_L2 = np.linalg.norm(obs0 - obsK)

        errors.append(err_L2)
        timestamps.append(observations['timestamp'])
# last['time_from_episode_start'] = observations['time_from_episode_start']

    initial_distance = np.linalg.norm(translation_from_SE2(pose1))

    summary = {}
    summary['pose0'] = pose0
    summary['pose1'] = pose1
    summary['poses'] = poses
    summary['errors'] = errors
    summary['timestamps'] = timestamps
    summary['initial_translation'] = translation_from_SE2(pose1)
    summary['initial_distance'] = initial_distance
    summary['initial_rotation'] = angle_from_SE2(pose1)
    summary['dist_xy'] = dist_xy
    summary['dist_th'] = dist_th
    return summary
Esempio n. 5
0
 def to_yaml(self):
     if self.current_observations is None:
         raise Exception('Observations not computed')
     return {
         'sensor': self.sensor.to_yaml(),
         'pose': SE3.to_yaml(self.pose),
         'joint': self.joint,
         'extra': self.extra,
         'current_observations': dict_to_yaml(self.current_observations),
         'current_pose': SE3.to_yaml(self.current_pose),
     }
Esempio n. 6
0
 def to_yaml(self):
     if self.current_observations is None:
         raise Exception('Observations not computed')
     return {
         'sensor': self.sensor.to_yaml(),
         'pose': SE3.to_yaml(self.pose),
         'joint': self.joint,
         'extra': self.extra,
         'current_observations':
             dict_to_yaml(self.current_observations),
         'current_pose': SE3.to_yaml(self.current_pose),
     }
 def update_(self, msg):
     child_frame_id = msg.child_frame_id
     if child_frame_id == '/base_footprint':
         new_pose = pose_from_ROS_transform(msg.transform)
         if False:
             if self.pose is not None:
                 delta = SE3.multiply(SE3.inverse(self.pose), new_pose)
                 x, y = translation_from_SE2(SE2_from_SE3(delta))
                 interval = (msg.header.stamp - self.stamp).to_sec()
                 print('base_delta: delta %.3f seconds %.4f y %.4f' % (interval, x, y))
         self.pose = new_pose
         self.stamp = msg.header.stamp
Esempio n. 8
0
def test_collisions():
    L = 10
    world = Box(L, L)
    #    id_vehicle = 'd_SE2_rb_v-rf360'
    id_vehicle = 'd_SE2_rb_v-random_5'

    get_vehicles_config().load('default')
    vehicles = get_conftools_vehicles()
    # vehicles.load('default')
    vehicle = vehicles.instance(id_vehicle)  # @UndefinedVariable
    vehicle.set_world_primitives(world.get_primitives())
    vehicle.set_pose(SE3.identity())

    commands = np.array([1, 0, 0])

    # go straight
    simulation_length = 10
    dt = 0.1
    time = 0
    steps = int(np.ceil(simulation_length / dt))
    for _ in range(steps):
        vehicle.simulate(commands, dt)
        pose = SE2_from_SE3(vehicle.get_pose())
        translation = translation_from_SE2(pose)
        # print('t=%.3f %s' % (time, SE2.friendly(pose)))
        time += dt
        assert translation[0] <= 9.5
    assert translation[0] >= 9.4
    assert translation[1] == 0
def jac_function(theta):
    body_to_c = SE3.group_from_algebra(
        se3.algebra_from_vector(theta[:6]))

    # tag_in_board_offset = theta[6:]
    t_in_board = tag_in_board.copy()
    # t_in_board[:3,3] += tag_in_board_offset
    # t_in_board = np.dot(t_in_board, SE3.group_from_algebra(
    # se3.algebra_from_vector(tag_in_board_offset)))
    error = 0
    img_count = 0
    jacs = []
    for measurement, body_to_world, board_to_world, tag_in_cam in data_tuples:
        tag_pts = np.concatenate(
            (objp, np.ones((objp.shape[0], 1))), axis=1).transpose()
        tag_pts_in_world = np.dot(
            board_to_world, np.dot(t_in_board, tag_pts))
        tag_pts_in_body = np.dot(np.linalg.inv(
            body_to_world), tag_pts_in_world)
        tag_pts_in_cam = np.dot(body_to_c, tag_pts_in_body)

        projections, jac = cv2.projectPoints(
            tag_pts_in_body.T[:, :3], theta[:3], theta[3:6], K, np.zeros((1, 4)))

        jacs.append(jac[:, :6])
    return np.vstack(np.array(jacs))
Esempio n. 10
0
def test_collisions():
    L = 10
    world = Box(L, L)
#    id_vehicle = 'd_SE2_rb_v-rf360'
    id_vehicle = 'd_SE2_rb_v-random_5'

    get_vehicles_config().load('default')
    vehicles = get_conftools_vehicles()
    # vehicles.load('default')
    vehicle = vehicles.instance(id_vehicle)  # @UndefinedVariable
    vehicle.set_world_primitives(world.get_primitives())
    vehicle.set_pose(SE3.identity())

    commands = np.array([1, 0, 0])

    # go straight
    simulation_length = 10
    dt = 0.1
    time = 0
    steps = int(np.ceil(simulation_length / dt))
    for _ in range(steps):
        vehicle.simulate(commands, dt)
        pose = SE2_from_SE3(vehicle.get_pose())
        translation = translation_from_SE2(pose)
        # print('t=%.3f %s' % (time, SE2.friendly(pose)))
        time += dt
        assert translation[0] <= 9.5
    assert translation[0] >= 9.4
    assert translation[1] == 0
Esempio n. 11
0
def cost_function_tuple_offset( params):
    cam_to_body = SE3.group_from_algebra(se3.algebra_from_vector(params[:6]))
    tuple_offset = int(params[6]*100)
    # Use a central region of data tuples +- 100 
    # The offset determines the start of the measurement offset
    # pdb.set_trace()
    # measurements_offset = data_tuples[buffer_size + tuple_offset: -buffer_size + tuple_offset, 0]
    # bodys_to_world_tuple_offset = data_tuples[buffer_size:-buffer_size, 1]
    # boards_to_world_tuple_offset = data_tuples[buffer_size:-buffer_size, 2]
    # offset_tuples = np.concatenate(measurements_offset, bodys_to_world_offset, boards_to_world_offset, axis=1)

    error = 0
    for i in range(len(data_tuples) - buffer_size*2):
        measurement = data_tuples[i + buffer_size + tuple_offset][0]
        body_to_world = data_tuples[i + buffer_size][1]
        board_to_world = data_tuples[i + buffer_size][2]

        cam_to_world = np.dot(body_to_world, cam_to_body)
        tag_pts = np.array([[-s, -s, 0, 1], [s, -s, 0, 1],
                                [s, s, 0, 1], [-s, s, 0, 1]]).transpose()
        tag_in_board = np.array(
                [[0, -1, 0, s], [1, 0, 0, s], [0, 0, 1, 0], [0, 0, 0, 1]])
        tag_pts_in_world = np.dot(
            board_to_world, np.dot(tag_in_board, tag_pts))
        tag_pts_in_cam = np.dot(np.linalg.inv(cam_to_world), tag_pts_in_world)

        projections = np.dot(K, tag_pts_in_cam[:3, :])
        projections /= projections[2]
        projections = projections[:2].transpose()

        error += np.linalg.norm(measurement - projections)
    return error
Esempio n. 12
0
    def Interpolate(a, b, time):
        ratio = float(time - a.id) / (b.id - a.id)
        M0, M1 = a.M, b.M
        dm = SE3.algebra_from_group(M1.dot(invT(M0)))
        dM = SE3.group_from_algebra(ratio * dm)
        Mt = dM.dot(M0)
        cov_time = (1 - ratio) * a.cov + ratio * b.cov

        return AngleAxisPose.FromM(Mt, id=time, cov=cov_time)

        def test_Interpolate():
            p1 = AngleAxisPose(np.zeros(3), np.zeros(3), 0,
                               block_diag(np.eye(3), np.eye(3)))
            p2 = AngleAxisPose(np.zeros(3), np.ones(3), 1,
                               block_diag(np.eye(3), np.eye(3)))
            AngleAxisPose.Interpolate(p1, p2, 0)
            AngleAxisPose.Interpolate(p1, p2, 0.5)
            AngleAxisPose.Interpolate(p1, p2, 1)
Esempio n. 13
0
def show_sensor_data(pylab, vehicle, robot_pose=None, col='r'):
    if robot_pose is None:
        robot_pose = SE3.from_yaml(vehicle['pose'])
    for attached in vehicle['sensors']:
        sensor_pose = from_yaml(attached['current_pose'])
        sensor_t, sensor_theta = \
            translation_angle_from_SE2(SE2_from_SE3(sensor_pose))
        print('robot: %s' % SE2.friendly(SE2_from_SE3(robot_pose)))
        print(' sens: %s' % SE2.friendly(SE2_from_SE3(sensor_pose)))
#        sensor_theta = -sensor_theta
        sensor = attached['sensor']
        if sensor['type'] == 'Rangefinder':
            directions = np.array(sensor['directions'])
            observations = attached['current_observations']
            readings = np.array(observations['readings'])
            valid = np.array(observations['valid'])
#            directions = directions[valid]
#            readings = readings[valid]
            x = []
            y = []
            rho_min = 0.05
            for theta_i, rho_i in zip(directions, readings):
                print('theta_i: %s' % theta_i)
                x.append(sensor_t[0] + 
                         np.cos(sensor_theta + theta_i) * rho_min)
                y.append(sensor_t[1] + 
                         np.sin(sensor_theta + theta_i) * rho_min)
                x.append(sensor_t[0] + 
                         np.cos(sensor_theta + theta_i) * rho_i)
                y.append(sensor_t[1] + 
                         np.sin(sensor_theta + theta_i) * rho_i)
                x.append(None)
                y.append(None)
            pylab.plot(x, y, color=col, markersize=0.5, zorder=2000)
        elif sensor['type'] == 'Photoreceptors':
            directions = np.array(sensor['directions'])
            observations = attached['current_observations']
            readings = np.array(observations['readings'])
            luminance = np.array(observations['luminance'])
            valid = np.array(observations['valid'])
            readings[np.logical_not(valid)] = 0.6
            rho_min = 0.5
            for theta_i, rho_i, lum in zip(directions, readings, luminance):
                x = []
                y = []
                x.append(sensor_t[0] + 
                         np.cos(sensor_theta + theta_i) * rho_min)
                y.append(sensor_t[1] + 
                         np.sin(sensor_theta + theta_i) * rho_min)
                x.append(sensor_t[0] + 
                         np.cos(sensor_theta + theta_i) * rho_i)
                y.append(sensor_t[1] + 
                         np.sin(sensor_theta + theta_i) * rho_i)
                pylab.plot(x, y, color=(lum, lum, lum),
                           markersize=0.5, zorder=2000)
        else:
            print('Unknown sensor type %r' % sensor['type'])
Esempio n. 14
0
def cairo_plot_sensor_data(cr, vehicle_state, scale=1.0, compact=True):
    for attached in vehicle_state['sensors']:
        sensor = attached['sensor']
        observations = attached['current_observations']
        sensor_pose = SE2_from_SE3(SE3.from_yaml(attached['current_pose']))
        sensor_type = sensor['type']

        if sensor_type == VehiclesConstants.SENSOR_TYPE_RANGEFINDER:
            if compact:
                with cairo_rototranslate(cr, sensor_pose):
                    cr.scale(scale, scale)
                    plot_ranges_compact(cr=cr,
                        directions=np.array(sensor['directions']),
                        valid=np.array(observations['valid']),
                        readings=np.array(observations['readings'])
                        )
            else:
                plot_ranges(cr=cr,
                        pose=sensor_pose,
                        directions=np.array(sensor['directions']),
                        valid=np.array(observations['valid']),
                        readings=np.array(observations['readings'])
                        )
        elif sensor_type == VehiclesConstants.SENSOR_TYPE_PHOTORECEPTORS:
            if compact:
                with cairo_rototranslate(cr, sensor_pose):
                    cr.scale(scale, scale)
                    plot_photoreceptors_compact(cr=cr,
                        directions=np.array(sensor['directions']),
                        valid=np.array(observations['valid']),
                        luminance=np.array(observations['luminance']))
            else:
                plot_photoreceptors(cr=cr,
                        pose=sensor_pose,
                        directions=np.array(sensor['directions']),
                        valid=np.array(observations['valid']),
                        readings=np.array(observations['readings']),
                        luminance=np.array(observations['luminance']))
        elif sensor_type == VehiclesConstants.SENSOR_TYPE_FIELDSAMPLER:

            if True:
                with cairo_rototranslate(cr, sensor_pose):
                    plot_fieldsampler_fancy(cr=cr,
                        positions=np.array(sensor['positions']),
                        sensels=np.array(observations['sensels']))
            else:
                plot_fieldsampler(cr=cr,
                        pose=sensor_pose,
                        positions=np.array(sensor['positions']),
                        sensels=np.array(observations['sensels']))
        elif sensor_type == 'RandomSensor':
            # XXX: how can we visualize this?
            pass
        else:
            logger.warning('Unknown sensor type %r.' % sensor['type'])
            pass
Esempio n. 15
0
def plot_servonave(cr, servonav):
    locations = servonav['locations']
#    current_goal = servonav['current_goal']
    for _, loc in enumerate(locations):
        pose = SE2_from_SE3(SE3.from_yaml(loc['pose']))
        with cairo_rototranslate(cr, pose):
#            if current_goal == i:
#                cairo_ref_frame(cr, l=0.5)
#            else:
            grey = [.6, .6, .6]
            cairo_ref_frame(cr, l=0.5, x_color=grey, y_color=grey)
Esempio n. 16
0
 def compute_observations(self):
     # TODO: add dynamics observations
     sensel_values = []
     for attached in self.sensors:
         pose = self.dynamics.joint_state(self._get_state(), attached.joint)
         j_pose, _ = pose
         attached.current_pose = SE3.multiply(j_pose, attached.pose)
         attached.current_observations = \
             attached.sensor.compute_observations(attached.current_pose)
         sensels = attached.current_observations['sensels']
         sensel_values.extend(sensels.tolist())
     return np.array(sensel_values, dtype='float32')
Esempio n. 17
0
 def compute_observations(self):
     # TODO: add dynamics observations
     sensel_values = []
     for attached in self.sensors:
         pose = self.dynamics.joint_state(self._get_state(), attached.joint)
         j_pose, _ = pose
         attached.current_pose = SE3.multiply(j_pose, attached.pose)
         attached.current_observations = \
             attached.sensor.compute_observations(attached.current_pose)
         sensels = attached.current_observations['sensels']
         sensel_values.extend(sensels.tolist())
     return np.array(sensel_values, dtype='float32')
Esempio n. 18
0
def sparse_sequence(data, min_th_dist, min_dist):
    """ Yields a sequence """
    # sp = Sparsifier(manifold=R2, min_dist=min_dist)
    last_pose = None
    for data, pose in data:  
        if last_pose is None:
            ok = True
        else:
            distances = SE3.distances(pose, last_pose)
            # print distances
            ok = (distances[0] > min_dist) or (distances[1] > min_th_dist)
        if ok:
            last_pose = pose
            yield data, pose
def cost_function(theta):
    body_to_c = SE3.group_from_algebra(
        se3.algebra_from_vector(theta[:6]))

    # tag_in_board_offset = theta[6:]
    t_in_board = tag_in_board.copy()
    # t_in_board[:3,3] += tag_in_board_offset
    # t_in_board = np.dot(t_in_board, SE3.group_from_algebra(
    # se3.algebra_from_vector(tag_in_board_offset)))
    error = 0
    img_count = 0
    residual_vectors = []
    for measurement, body_to_world, board_to_world, tag_in_cam in data_tuples:
        tag_pts = np.concatenate(
            (objp, np.ones((objp.shape[0], 1))), axis=1).transpose()
        tag_pts_in_world = np.dot(
            board_to_world, np.dot(t_in_board, tag_pts))
        tag_pts_in_body = np.dot(np.linalg.inv(
            body_to_world), tag_pts_in_world)
        tag_pts_in_cam = np.dot(body_to_c, tag_pts_in_body)

        projections, jac = cv2.projectPoints(
            tag_pts_in_body.T[:, :3], theta[:3], theta[3:6], K, np.zeros((1, 4)))
        projections = projections.astype(np.float32)

        # cv2.drawChessboardCorners(debug_img, (rows, cols), projections, True)

        projections.shape = (projections.shape[0], projections.shape[-1])
        measurement.shape = (measurement.shape[0], measurement.shape[-1])

        if img_count == 0:
            debug_img = img_tuples[img_count].copy()
            for i in range(projections.shape[0]):
                cv2.circle(
                    debug_img, (projections[i, 0], projections[i, 1]), 2, (255, 0, 0), 1)
                cv2.circle(
                    debug_img, (measurement[i, 0], measurement[i, 1]), 2, (0, 0, 255), 1)
                cv2.line(debug_img, (measurement[i, 0], measurement[i, 1]),
                         (projections[i, 0], projections[i, 1]), (0, 255, 0))
            cv2.imshow('cost function img', debug_img)
            cv2.waitKey(10)
            # pdb.set_trace()

        img_count += 1
        residual_vectors.append((measurement - projections).ravel())
        # np.linalg.norm(measurement - projections)
        # error += np.sum((measurement - projections), axis=0)
    return np.array(residual_vectors).ravel()
Esempio n. 20
0
 def joint_state(self, state, joint=0):
     base_state = self.base_state_from_big_state(state)
     top_state = self.top_state_from_big_state(state)
     conf = self.base.joint_state(base_state, 0)
     body_pose, body_vel = conf
     if joint == 0:
         # print('here: %s' % describe_value(conf))
         return body_pose, body_vel
     elif joint == 1:
         # FIXME: velocity not computed
         q, _ = self.top.joint_state(top_state)
         j = SE3.multiply(body_pose, q)
         jvel = se3.zero()  # @UndefinedVariable
         conf2 = j, jvel
         # print('there: %s' % describe_value(conf2))
         return conf2
     else:
         raise ValueError('No such joint %d.' % joint)
Esempio n. 21
0
 def joint_state(self, state, joint=0):
     base_state = self.base_state_from_big_state(state)
     top_state = self.top_state_from_big_state(state)
     conf = self.base.joint_state(base_state, 0)
     body_pose, body_vel = conf
     if joint == 0:
         # print('here: %s' % describe_value(conf))
         return body_pose, body_vel
     elif joint == 1:
         # FIXME: velocity not computed
         q, _ = self.top.joint_state(top_state)
         j = SE3.multiply(body_pose, q)
         jvel = se3.zero()  # @UndefinedVariable
         conf2 = j, jvel
         # print('there: %s' % describe_value(conf2))
         return conf2
     else:
         raise ValueError('No such joint %d.' % joint)
Esempio n. 22
0
    def joint_state(self, state, joint=0):
        car_state = self.car_state_from_big_state(state)
        steering = self.steering_from_big_state(state)

        car_position = self.car.joint_state(car_state, joint=0)

        if joint == 0:
            return car_position
        elif joint == 1:
            p = [self.car.axis_dist, self.car.L]
        elif joint == 2:
            p = [self.car.axis_dist, -self.car.L]
        else:
            raise ValueError('Invalid joint ID %r' % joint)

        pose, vel = car_position
        rel_pose = SE3_from_SE2(SE2_from_translation_angle(p, steering))
        wpose = SE3.multiply(pose, rel_pose)
        return wpose, vel # XXX: vel
Esempio n. 23
0
    def joint_state(self, state, joint=0):
        car_state = self.car_state_from_big_state(state)
        steering = self.steering_from_big_state(state)

        car_position = self.car.joint_state(car_state, joint=0)

        if joint == 0:
            return car_position
        elif joint == 1:
            p = [self.car.axis_dist, self.car.L]
        elif joint == 2:
            p = [self.car.axis_dist, -self.car.L]
        else:
            raise ValueError('Invalid joint ID %r' % joint)

        pose, vel = car_position
        rel_pose = SE3_from_SE2(SE2_from_translation_angle(p, steering))
        wpose = SE3.multiply(pose, rel_pose)
        return wpose, vel  # XXX: vel
Esempio n. 24
0
def cost_function( cam_to_body_log):
    cam_to_body = SE3.group_from_algebra(se3.algebra_from_vector(cam_to_body_log))
    error = 0
    for measurement, body_to_world, board_to_world in data_tuples:
        cam_to_world = np.dot(body_to_world, cam_to_body)
        tag_pts = np.array([[-s, -s, 0, 1], [s, -s, 0, 1],
                                [s, s, 0, 1], [-s, s, 0, 1]]).transpose()
        tag_in_board = np.array(
                [[0, -1, 0, s], [1, 0, 0, s], [0, 0, 1, 0], [0, 0, 0, 1]])
        tag_pts_in_world = np.dot(
            board_to_world, np.dot(tag_in_board, tag_pts))
        tag_pts_in_cam = np.dot(np.linalg.inv(cam_to_world), tag_pts_in_world)

        projections = np.dot(K, tag_pts_in_cam[:3, :])
        projections /= projections[2]
        projections = projections[:2].transpose()

        error += np.linalg.norm(measurement - projections)
    return error
Esempio n. 25
0
def cairo_plot_sensor_skins(cr, vehicle_state, scale=1.0):
    for attached in vehicle_state['sensors']:
        sensor = attached['sensor']
        sensor_pose = SE2_from_SE3(SE3.from_yaml(attached['current_pose']))

        extra = attached.get('extra', {})
        sensor_skin = extra.get('skin', None)

        if sensor_skin is None:
            sensor_skin = sensor['type']

        skins_library = get_conftools_skins()
        if not sensor_skin in skins_library:
            logger.warning('Could not find skin %r' % sensor_skin)
        else:
            skin = skins_library.instance(sensor_skin)

            with cairo_rototranslate(cr, sensor_pose):
                cr.scale(scale, scale)
                skin.draw(cr)
Esempio n. 26
0
    def to_yaml(self):
        # pose, velocity
        configuration = self.get_configuration()
        pose = configuration[0]

        joints = []
        for i in range(self.dynamics.num_joints()):
            jc = self.dynamics.joint_state(self._get_state(), i)
            joints.append(to_yaml("TSE3", jc))

        data = {
            'radius': self.radius,
            'id_sensors': self.id_sensors,
            'id_dynamics': self.id_dynamics,
            'pose': SE3.to_yaml(pose),
            'conf': to_yaml('TSE3', configuration),
            'state': self.dynamics.state_to_yaml(self._get_state()),
            'joints': joints,
            'sensors': [s.to_yaml() for s in self.sensors],
            'extra': self.extra
        }
        check_yaml_friendly(data)
        return data
Esempio n. 27
0
    def to_yaml(self):
        # pose, velocity
        configuration = self.get_configuration()
        pose = configuration[0]

        joints = []
        for i in range(self.dynamics.num_joints()):
            jc = self.dynamics.joint_state(self._get_state(), i)
            joints.append(to_yaml("TSE3", jc))

        data = {
            'radius': self.radius,
            'id_sensors': self.id_sensors,
            'id_dynamics': self.id_dynamics,
            'pose': SE3.to_yaml(pose),
            'conf': to_yaml('TSE3', configuration),
            'state': self.dynamics.state_to_yaml(self._get_state()),
            'joints': joints,
            'sensors': [s.to_yaml() for s in self.sensors],
            'extra': self.extra
        }
        check_yaml_friendly(data)
        return data
Esempio n. 28
0
        np.set_printoptions(precision=3, suppress=True)
        axes_to_plot = range(6)
        for i in [cam_id]:  # range(num_images):
            pose_cpp = poses[i]
            depth_img_cpp = depth_imgs[i]

            sampled_poses = []
            ray_likelihoods = [[]] * 6
            for j in axes_to_plot:
                print '!!Evaluating for axis ' + labels[j]
                likes = []
                for k in range(num_poses):
                    print "Axis::{0} Pose::{1}".format(j, k)
                    delta_vector = np.zeros(6)
                    delta_vector[j] = deltas[k]
                    T = SE3.group_from_algebra(
                        se3.algebra_from_vector(delta_vector))
                    pose = np.dot(pose_cpp, T)
                    sampled_poses.append(pose)
                    # print 'pose is '+str(pose)
                    mrfmap.inference.set_pose(i, pose.astype(dtype))
                    # pdb.set_trace()
                    sum = mrfmap.inference.compute_likelihood(i)
                    likes.append(sum)
                ray_likelihoods[j] = np.array(likes)

            # viewer.visualize_poses(sampled_poses, resolution, scaled_dims)
            # viewer.show()

            f, ax = plt.subplots(6, 1)

            handles = []
Esempio n. 29
0
def servoing_episode(id_robot, robot,
                     id_servo_agent, servo_agent,
                     writer, id_episode,
                     displacement,
                     max_episode_len,
                     save_robot_state,
                     converged_dist_t_m=0.1,
                     converged_dist_th_deg=1,
                     max_tries=10000):
    '''
    
        :arg:displacement: Time in seconds to displace the robot.
    '''
    from geometry import SE3

    def mean_observations(n=10):
        obss = []
        for _ in range(n):  # XXX: fixed threshold
            obss.append(robot.get_observations().observations)
        return np.mean(obss, axis=0)

    def robot_pose():
        return robot.get_observations().robot_pose

    def timestamp():
        return robot.get_observations().timestamp

    def episode_ended():
        return robot.get_observations().episode_end

    def simulate_hold(cmd0, displacement):
        t0 = timestamp()
        nsteps = 0
        while timestamp() < t0 + displacement:
            nsteps += 1
            source = BootOlympicsConstants.CMD_SOURCE_SERVO_DISPLACEMENT
            robot.set_commands(cmd0, source)
            if episode_ended():
                logger.debug('Collision after %d steps' % ntries)
                return False

        logger.debug('%d steps of simulation to displace by %s' % 
                    (nsteps, displacement))
        return True

    for ntries in xrange(max_tries):
        # iterate until we can do this correctly
        episode = robot.new_episode()
        obs0 = mean_observations()
        cmd0 = robot.get_spec().get_commands().get_random_value()
        pose0 = robot_pose()
        ok = simulate_hold(cmd0, displacement)
        if ok:
            pose1 = robot_pose()
            logger.info('Displacement after %s tries.' % ntries)
            break
    else:
        msg = 'Could not do the displacement (%d tries).' % max_tries
        raise Exception(msg)

    servo_agent.set_goal_observations(obs0)

    for robot_observations, boot_observations in \
        run_simulation_servo(id_robot, robot,
                           id_servo_agent, servo_agent,
                           100000, max_episode_len,
                           id_episode=id_episode,
                           id_environment=episode.id_environment):

        def pose_to_yaml(x):
            ''' Converts to yaml, or sets None. '''
            if x is None:
                return None
            else:
                return SE3.to_yaml(x)

        extra = {}

        sensels_list = boot_observations['observations'].tolist()
        extra['servoing_base'] = dict(goal=obs0.tolist(), current=sensels_list)

        current_pose = robot_observations.robot_pose
        has_pose = current_pose is not None

        if has_pose:
            # Add extra pose information

            extra['servoing_poses'] = dict(goal=pose_to_yaml(pose0),
                                           current=pose_to_yaml(current_pose))

            delta = SE2_from_SE3(SE3.multiply(SE3.inverse(current_pose),
                                              pose0))
            dist_t_m = np.linalg.norm(translation_from_SE2(delta))
            dist_th_deg = np.abs(angle_from_SE2(delta))

            # TODO: make it not overlapping
            extra['servoing'] = dict(obs0=obs0.tolist(),
                                    pose0=pose_to_yaml(pose0),
                                    poseK=pose_to_yaml(current_pose),
                                    obsK=sensels_list,
                                    displacement=displacement,
                                    cmd0=cmd0.tolist(),
                                    pose1=pose_to_yaml(pose1))

        if save_robot_state:
            extra['robot_state'] = robot.get_state()

        writer.push_observations(observations=boot_observations,
                                 extra=extra)

        if has_pose:
            if ((dist_t_m <= converged_dist_t_m) and
                (dist_th_deg <= converged_dist_th_deg)):
                print('Converged!')
                break
        else:
            # TODO: write convergence criterion
            # without pose information
            pass
    def get_observations(self, messages, queue):
        """
            messages: topic -> last ROS Message
            returns: an instance of RobotObservations, or None if the update is not ready.
                
        """
        self.debug_nseen += 1
        
        if self.use_tf:
            for topic, msg, _ in queue:
                if topic == '/tf':
                    pose = self.tfreader.update(msg)
                    if pose is not None:
                        robot_pose = pose
                
                
        if self.is_ready(messages, queue):    
            observations = self.obs_adapter.observations_from_messages(messages)
            cmds = self.cmd_adapter.commands_from_messages(messages)
            if cmds is None:
                # we couldn't interpret a meaningful message for use
                # logger.info('Skipping because not interpretable by my cmd_adapter.')
                self.debug_nskipped += 1
                if self.debug_nskipped % 100 == 0:
                    perc = 100.0 * self.debug_nskipped / self.debug_nseen
                    msg = ('Skipped %6d/%6d  = %.1f%% because cmd_adapter could not interpret.' % 
                           (self.debug_nskipped, self.debug_nseen, perc))
                    logger.info(msg)
                return None
            
            commands_source, commands = cmds
            episode_end = False
            _, _, last_t = queue[-1]
            timestamp = last_t.to_sec()
            
            robot_pose = None
            
            if self.use_odom_topic:
                odometry = messages[self.odom_topic]
                ros_pose = odometry.pose.pose
                x = ros_pose.position.x
                y = ros_pose.position.y
                theta = ros_pose.orientation.z
                from geometry import SE3_from_SE2, SE2_from_translation_angle
                robot_pose = SE3_from_SE2(SE2_from_translation_angle([x, y], theta))
                # print('odom: %s' % SE2.friendly(SE2_from_SE3(robot_pose)))

                if self.prev_odom is not None:
                    delta = SE3.multiply(SE3.inverse(self.prev_odom), robot_pose)
                    # print('odom delta: %s' % SE2.friendly(SE2_from_SE3(delta)))
       
                self.prev_odom = robot_pose
            
            if self.use_tf:
                robot_pose = self.tfreader.get_pose()
            
            return RobotObservations(timestamp=timestamp,
                                     observations=observations,
                                     commands=commands, commands_source=commands_source,
                                     episode_end=episode_end,
                                     robot_pose=robot_pose)
        else:
            return None
Esempio n. 31
0
def plot_servoing_poses(cr, servoing_poses):
    # TODO
    goal = SE3.from_yaml(servoing_poses['goal'])
    with cairo_rototranslate(cr, goal):
        cairo_ref_frame(cr, l=0.5)
Esempio n. 32
0
def step_too_big(pose1, pose2, max_theta_diff_deg):
    diff = SE3.multiply(SE3.inverse(pose1), pose2)
    diff_theta = np.abs(angle_from_SE2(SE2_from_SE3(diff)))
    return diff_theta > np.deg2rad(max_theta_diff_deg)
Esempio n. 33
0
def regularly_spaced(data, min_dist):
    sp = Sparsifier(manifold=R2, min_dist=min_dist)
    for data, pose in data:
        p = SE3.project_to(R2, pose)        
        if sp.accept(p):
            yield data, pose
Esempio n. 34
0
def pose_diff(a, b):
    return SE3.multiply(SE3.inverse(a), b)
Esempio n. 35
0
def vehicles_cairo_display_all(cr, width, height,
                            sim_state,
                            grid=1,
                            zoom=0,
                            bgcolor=[1, 1, 1],
                            zoom_scale_radius=False,
                            extra_draw_world=None,
                            first_person=True,
                            show_world=True,
                            # show_features='relevant',
                            show_sensor_data=True,
                            show_sensor_data_compact=False,
                            show_robot_body=True,
                            show_robot_sensors=True,
                            show_textures=True,
                            plot_sources=False):
    '''
        :param zoom_scale_radius: If true, scales the zoom by the robot radius.
    
    '''
    with cairo_save(cr):
        # Paint white
        if bgcolor is not None:
            with cairo_save(cr):
                cairo_set_color(cr, bgcolor)
                cr.rectangle(0, 0, width, height)
                cr.fill()

        vehicle_state = sim_state['vehicle']
        robot_pose = SE2_from_SE3(SE3.from_yaml(vehicle_state['pose']))

        robot_radius = vehicle_state['radius']
        world_state = sim_state['world']
        bounds = world_state['bounds']
        bx = bounds[0]
        by = bounds[1]

        if zoom_scale_radius and zoom != 0:
            zoom = zoom * robot_radius

        world_bounds_pad = 0  # CairoConstants.texture_width
        vehicles_cairo_set_coordinates(cr, width, height,
                                       bounds, robot_pose,
                                       zoom=zoom,
                                       world_bounds_pad=world_bounds_pad,
                                       first_person=first_person)

        if False:
            cr.set_source_rgb(0, 1, 0)
            cr.set_line_width(0.05)
            cr.rectangle(bx[0], by[0], bx[1] - bx[0], by[1] - by[0])
            cr.stroke()

        if grid > 0:
            show_grid(cr, bx, by, spacing=grid, margin=1)

        if extra_draw_world is not None:
            extra_draw_world(cr)

        sensor_types = get_sensor_types(vehicle_state)

        has_cameras = (VehiclesConstants.SENSOR_TYPE_PHOTORECEPTORS
                             in sensor_types)
        has_field_sampler = (VehiclesConstants.SENSOR_TYPE_FIELDSAMPLER
                             in sensor_types)

        if show_world:
            if has_field_sampler:
                cairo_plot_sources(cr, world_state)

            plot_textures = has_cameras and show_textures
            cairo_show_world_geometry(cr, world_state,
                                      plot_textures=plot_textures,
                                      plot_sources=plot_sources,
                                      extra_pad=world_bounds_pad)

        # XXX: tmp
        if extra_draw_world is not None:
            extra_draw_world(cr)

        if show_robot_body:
            joints = get_joints_as_TSE3(vehicle_state)
            extra = vehicle_state.get('extra', {})
            id_skin = extra.get('skin', 'default_skin')
            
            skin = get_conftools_skins().instance(id_skin) 

            with cairo_rototranslate(cr, robot_pose):
                cr.scale(robot_radius, robot_radius)
                skin.draw(cr, joints=joints,
                          timestamp=sim_state['timestamp'])

        if show_robot_sensors:
            # don't like it cause it uses global "current_pose"
            cairo_plot_sensor_skins(cr, vehicle_state,
                                   scale=robot_radius)

        if show_sensor_data:
            cairo_plot_sensor_data(cr, vehicle_state,
                                   scale=robot_radius,
                                   compact=show_sensor_data_compact)
def got_tuple(img_msg, cam_odom, board_odom):
    img = bridge.imgmsg_to_cv2(img_msg, "bgr8")
    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)

    body_to_world = transform_matrix_from_odom(cam_odom)
    board_to_world = transform_matrix_from_odom(board_odom)

    # Get detection from tracker
    pixels = []
    debug_img = bridge.imgmsg_to_cv2(img_msg, "bgr8")

    gray = cv2.cvtColor(debug_img, cv2.COLOR_BGR2GRAY)

    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
    axis = np.float32([[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5]]).reshape(-1, 3)

    ret, corners = cv2.findChessboardCorners(
        gray, (cols, rows), None, cv2.CALIB_CB_FILTER_QUADS)
    cv2.drawChessboardCorners(debug_img, (cols, rows), corners, ret)
    cv2.imshow('PreRefinement', debug_img)

    if ret == True:
        if K is None:
            print 'K not initialized yet!'
            return
        corners2 = cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria)
        corners2.shape = (corners2.shape[0], corners2.shape[-1])
        # Find the rotation and translation vectors.
        ret, rvecs, tvecs = cv2.solvePnP(
            objp, corners2, K, np.array([[0, 0, 0, 0]], dtype=np.float32))
        if ret == True:
            rodRotMat = cv2.Rodrigues(rvecs)
            tag_in_cam = np.eye(4)
            tag_in_cam[:3, :3] = rodRotMat[0]
            tag_in_cam[:3, 3] = tvecs[:, 0]
            # project 3D points to image plane
            imgpts, jac = cv2.projectPoints(
                axis, rvecs, tvecs, K, np.zeros((1, 4)))
            if visualize:
                cv2.drawChessboardCorners(
                    debug_img, (cols, rows), corners2, ret)
                img_msg = bridge.cv2_to_imgmsg(debug_img)
                img_msg.header.frame_id = 'cam'
                img_pub.publish(img_msg)
                K_msg.header.stamp = img_msg.header.stamp
                cam_info_pub.publish(K_msg)

                debug_img = draw(debug_img, corners2, imgpts)

                print ret

                cam_to_world = np.dot(body_to_world, cam_to_body)

                broadcaster.sendTransform(body_to_world[:3, 3],
                                          tf.transformations.quaternion_from_matrix(
                    body_to_world),
                    rospy.Time.now(),
                    'body',
                    "world")

                broadcaster.sendTransform(board_to_world[:3, 3],
                                          tf.transformations.quaternion_from_matrix(
                    board_to_world),
                    rospy.Time.now(),
                    'board',
                    "world")

                broadcaster.sendTransform(cam_to_body[:3, 3],
                                          tf.transformations.quaternion_from_matrix(
                    cam_to_body),
                    rospy.Time.now(),
                    'cam',
                    "body")

                broadcaster.sendTransform(tag_in_cam[:3, 3],
                                          tf.transformations.quaternion_from_matrix(
                    tag_in_cam),
                    rospy.Time.now(),
                    'tag',
                    "cam")

                broadcaster.sendTransform(tag_in_board[:3, 3],
                                          tf.transformations.quaternion_from_matrix(
                    tag_in_board),
                    rospy.Time.now(),
                    'tag_gt',
                    "board")
                # tag_in_cam = np.eye(4).astype(datatype)
                # # Now see if the 3D points projected make sense.

                # tag_pts = np.concatenate((objp, np.ones((objp.shape[0], 1))), axis=1).transpose()
                # tag_pts_in_world = np.dot(
                #     board_to_world, np.dot(tag_in_board, tag_pts))
                # tag_pts_in_cam = np.dot(np.linalg.inv(cam_to_world), tag_pts_in_world)

                # projections = np.dot(K, tag_pts_in_cam[:3, :])
                # projections /= projections[2]
                # projections = projections[:2].transpose()

                # pixels = []
                # Draw these pixels

                # pdb.set_trace()
            tag_in_cam_mocap_approx = np.dot(np.linalg.inv(
                cam_to_world), np.dot(board_to_world, tag_in_board))
            diff = np.dot(np.linalg.inv(tag_in_cam), tag_in_cam_mocap_approx)

            diff = se3.vector_from_algebra(SE3.algebra_from_group(diff))
            diffs_vector.append(diff)
            print diff
            print np.linalg.norm(diff[:3])
            # I'm curious to see the projected mocap frame in the image too
            pts = np.eye(4)
            pts[3, :] = 1

            origin_in_cam = np.dot(tag_in_cam_mocap_approx, pts)
            projections = np.dot(K, origin_in_cam[:3, :])
            projections /= projections[2]
            projections = projections.astype(np.float32)
            debug_img = cv2.line(debug_img, tuple(projections[:2, 3]), tuple(
                projections[:2, 0]), (0, 0, 127), 1)
            debug_img = cv2.line(debug_img, tuple(projections[:2, 3]), tuple(
                projections[:2, 1]), (0, 127, 0), 1)
            debug_img = cv2.line(debug_img, tuple(projections[:2, 3]), tuple(
                projections[:2, 2]), (127, 0, 0), 1)
            cv2.imshow('img', debug_img)
            cv2.waitKey(10)

            data_tuples.append(
                [corners2, body_to_world, board_to_world, tag_in_cam])
            img_tuples.append(cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR))
Esempio n. 37
0
if use_bag:
    with rosbag.Bag(path, 'r') as bag:
        counter = 0
        for topic, msg, t in bag.read_messages(topics_to_parse):
            if topic in topics_to_parse:
                index = topics_to_parse.index(topic)
                subs[index].signalMessage(msg)
                counter += 1
                if counter%1000 == 0:
                    print 'Read {0} tuples'.format(counter)
    
    # Try to use a black box optimizer
    print 'Starting optimization...'
    from scipy.optimize import minimize
    initial_guess = np.array([0,0,0,0,0,0,-0.1]) # Since initial guess is pretty close to unity
    result = minimize(cost_function_tuple_offset, initial_guess, bounds=np.array([[-1,1],[-1,1],[-1,1],[-1,1],[-1,1],[-1,1], [-1, 1]]))
    print 'Done, results is'
    print result
    print SE3.group_from_algebra(se3.algebra_from_vector(result.x[:6]))
    print result.x[6]
    pdb.set_trace()

else:
    rospy.Subscriber(topics_to_parse[0], Image,
                     lambda msg: subs[0].signalMessage(msg))
    rospy.Subscriber(topics_to_parse[1], Odometry,
                     lambda msg: subs[1].signalMessage(msg))
    rospy.Subscriber(topics_to_parse[2], Odometry,
                     lambda msg: subs[2].signalMessage(msg))
    rospy.spin()
Esempio n. 38
0
def interpolate(pose1, pose2):
    return SE3.geodesic(pose1, pose2, 0.5)
Esempio n. 39
0
 def convert(loc):
     loc = dict(**loc)
     loc["pose"] = SE3.to_yaml(loc["pose"])
     loc["observations"] = loc["observations"].tolist()
     return loc
Esempio n. 40
0
def get_joints_as_TSE3(vehicle_state):
    joints = []
    for js in vehicle_state['joints']:
        joints.append(SE3.from_yaml(js))
    return joints
Esempio n. 41
0
def servonav_episode(
    id_robot,
    robot,
    id_servo_agent,
    servo_agent,
    writer,
    id_episode,
    max_episode_len,
    save_robot_state,
    interval_write=1,
    interval_print=5,
    resolution=0.5,  # grid resolution
    delta_t_threshold=0.2,  # when to switch
    MIN_PATH_LENGTH=8,
    MAX_TIME_FOR_SWITCH=20.0,
    fail_if_not_working=False,
    max_tries=10000,
):
    """
    
        :arg:displacement: Time in seconds to displace the robot.
    """
    from geometry import SE2_from_SE3, translation_from_SE2, angle_from_SE2, SE3

    stats_write = InAWhile(interval_print)

    # Access the vehicleSimulation interface
    vsim = get_vsim_from_robot(robot)

    for _ in xrange(max_tries):
        # iterate until we can do this correctly
        episode = robot.new_episode()
        locations = get_grid(robot=robot, vsim=vsim, resolution=resolution)

        if len(locations) < MIN_PATH_LENGTH:
            logger.info("Path too short, trying again.")
        else:
            break

    else:
        msg = "Could not find path in %d tries." % max_tries
        raise Exception(msg)

    locations_yaml = convert_to_yaml(locations)

    vsim.vehicle.set_pose(locations[0]["pose"])

    current_goal = 1
    current_goal_obs = locations[current_goal]["observations"]
    servo_agent.set_goal_observations(current_goal_obs)

    counter = 0
    time_last_switch = None

    num_written = 0
    for robot_observations, boot_observations in run_simulation_servonav(
        id_robot,
        robot,
        id_servo_agent,
        servo_agent,
        100000,
        max_episode_len,
        id_episode=id_episode,
        id_environment=episode.id_environment,
        raise_error_on_collision=fail_if_not_working,
    ):

        current_time = boot_observations["timestamp"].item()
        if time_last_switch is None:
            time_last_switch = current_time

        time_since_last_switch = float(current_time - time_last_switch)

        def obs_distance(obs1, obs2):
            return float(np.linalg.norm(obs1.flatten() - obs2.flatten()))

        curr_pose = robot_observations.robot_pose
        curr_obs = boot_observations["observations"]
        curr_goal = locations[current_goal]["observations"]
        prev_goal = locations[current_goal - 1]["observations"]
        curr_err = obs_distance(curr_goal, curr_obs)
        prev_err = obs_distance(prev_goal, curr_obs)
        current_goal_pose = locations[current_goal]["pose"]
        current_goal_obs = locations[current_goal]["observations"]

        delta = SE2_from_SE3(SE3.multiply(SE3.inverse(curr_pose), current_goal_pose))
        delta_t = np.linalg.norm(translation_from_SE2(delta))
        delta_th = np.abs(angle_from_SE2(delta))

        if stats_write.its_time():
            msg = "  deltaT: %.2fm  deltaTh: %.1fdeg" % (delta_t, np.rad2deg(delta_th))
            logger.debug(msg)

        # If at the final goal, go closer
        is_final_goal = current_goal == len(locations) - 1
        if is_final_goal:
            delta_t_threshold *= 0.3

        # TODO: should we care also about delta_th?
        time_to_switch = (delta_t < delta_t_threshold) or (time_since_last_switch > MAX_TIME_FOR_SWITCH)
        # does not work: curr_err < SWITCH_THRESHOLD * prev_err:

        if time_to_switch:
            current_goal += 1
            logger.info("Switched to goal %d." % current_goal)

            time_last_switch = current_time
            if current_goal >= len(locations):
                # finished
                logger.info("Finished :-)")
                break

        threshold_lost_m = 3
        if delta_t > threshold_lost_m:
            msg = "Breaking because too far away."
            if not (fail_if_not_working):
                logger.error(msg)
                break
            else:
                raise Exception(msg)

        servo_agent.set_goal_observations(current_goal_obs)

        extra = {}
        extra["servoing_base"] = dict(goal=curr_goal.tolist(), current=curr_obs.tolist())

        extra["servoing_poses"] = dict(goal=SE3.to_yaml(current_goal_pose), current=SE3.to_yaml(curr_pose))

        extra["servonav"] = dict(
            poseK=SE3.to_yaml(curr_pose),
            obsK=boot_observations["observations"].tolist(),
            pose1=SE3.to_yaml(current_goal_pose),
            locations=locations_yaml,
            current_goal=current_goal,
            curr_err=curr_err,
            prev_err=prev_err,
            time_last_switch=time_last_switch,
            time_since_last_switch=time_since_last_switch,
        )

        if counter % interval_write == 0:
            if save_robot_state:
                extra["robot_state"] = robot.get_state()

            writer.push_observations(observations=boot_observations, extra=extra)
            num_written += 1
        counter += 1

    if num_written == 0:
        msg = "This log was too short to be written (%d observations)" % counter
        raise Exception(msg)
Esempio n. 42
0
 def pose_to_yaml(x):
     ''' Converts to yaml, or sets None. '''
     if x is None:
         return None
     else:
         return SE3.to_yaml(x)