Ejemplo n.º 1
0
    #
    # Para renomear a câmera da Raspberry
    #
    # 	rosrun topic_tools relay /raspicam_node/image/compressed /kamera
    #

    recebedor = rospy.Subscriber(topico_imagem,
                                 CompressedImage,
                                 roda_todo_frame,
                                 queue_size=4,
                                 buff_size=2**24)
    print("Usando ", topico_imagem)

    velocidade_saida = rospy.Publisher("/cmd_vel", Twist, queue_size=1)

    try:

        while not rospy.is_shutdown():
            vel = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))
            if len(media) != 0 and len(centro) != 0:
                print("Média dos vermelhos: {0}, {1}".format(
                    media[0], media[1]))
                print("Centro dos vermelhos: {0}, {1}".format(
                    centro[0], centro[1]))
                vel = Twist(Vector3(0, 0, 0), Vector3(0, 0, -0.1))
            velocidade_saida.publish(vel)
            rospy.sleep(0.1)

    except rospy.ROSInterruptException:
        print("Ocorreu uma exceção com o rospy")
            t0 = rospy.get_rostime()
            print("waiting for timer")
            continue
        t1 = rospy.get_rostime()
        elapsed = (t1 - t0)
        print("Passaram ", elapsed.secs, " segundos")
        rospy.sleep(1.0)

        limite = 30
        ### Fica fazendo ajustes sucessivos para ir cada vez mais perto da origem

        if elapsed.secs >= limite:
            print("Retonando a base de ", x, ",", y, ", ", math.degrees(alfa))
            ang = calcula_angulo(alfa, x, y)
            dist = calcula_dist(x, y)
            vel_rot = Twist(Vector3(0, 0, 0), Vector3(0, 0, max_angular))
            vel_trans = Twist(Vector3(max_linear, 0, 0), Vector3(0, 0, 0))
            zero = Twist(Vector3(0, 0, 0), Vector3(0, 0, 0))

            sleep_rot = abs(ang / max_angular)
            sleep_trans = abs(dist / max_linear)

            print(vel_rot, "\n", sleep_rot)
            velocidade_saida.publish(vel_rot)
            rospy.sleep(sleep_rot)

            print(vel_trans, "\n", sleep_trans)
            velocidade_saida.publish(vel_trans)
            rospy.sleep(sleep_trans)
            print("Terminou um ciclo")
            velocidade_saida.publish(zero)
Ejemplo n.º 3
0
from action_step_marker import ActionStepMarker
from pr2_arm_control.msg import Side, GripperState
from pr2_pbd_interaction.msg import (ArmState, ActionStepSequence, ActionStep,
                                     ArmTarget, GripperAction, ArmTrajectory)

# ######################################################################
# Module level constants
# ######################################################################

# Filename related
FILENAME_BASE = 'Action'
DEFAULT_FILE_EXT = '.bag'

# Marker properties for little arrows drawn between consecutive steps.
LINK_MARKER_LIFETIME = rospy.Duration(2)
LINK_SCALE = Vector3(0.01, 0.03, 0.01)
LINK_COLOR = ColorRGBA(0.8, 0.8, 0.8, 0.3)  # sort of light gray

# ROS topics, etc.
TOPIC_MARKERS = 'visualization_marker_array'
TOPIC_BAG_SEQ = 'sequence'  # used when saving

# TODO(mbforbes): This should be in one module only.
BASE_LINK = 'base_link'

# ######################################################################
# Classes
# ######################################################################


class ProgrammedAction:
Ejemplo n.º 4
0
        rospy.Subscriber("simu_send_euler_angles", Vector3, sub_euler)
        rospy.Subscriber("simu_send_wind_direction", Float32, sub_wind)
        rospy.Subscriber("simu_send_imu", Imu, sub_imu)

    else:
        rospy.Subscriber("filter_send_gps", GPSFix, sub_gps)
        rospy.Subscriber("filter_send_euler_angles", Vector3, sub_euler)
        rospy.Subscriber("filter_send_wind_direction", Float32, sub_wind)
        rospy.Subscriber("ardu_send_imu", Imu, sub_imu)

    listPoint, xRef = lectureCheckpoint(file)
    index = 0
    buoy = 0
    cubeA = Pose2D()
    cubeB = Pose2D()
    refmsgs = Vector3()
    refmsgs.x = xRef[0]
    refmsgs.y = xRef[1]
    wind = 0
    windFix = 0

    gpsready = 0
    while gpsready == 0 or ((xgps[0] - xRef[0])**2 +
                            (xgps[1] - xRef[1])**2) > 100:
        time.sleep(0.01)
        pub_ref.publish(refmsgs)

    xStart[0], xStart[1] = xgps[0], xgps[1]
    print(xStart)

    rate = rospy.Rate(25)
Ejemplo n.º 5
0
    def __init__(self):
        """ Initializing DDPG """
        self.ddpg = DDPG(a_dim=A_DIM,
                         s_dim=S_DIM,
                         batch_size=10,
                         memory_capacity=500)
        self.ep_reward = 0.0
        self.current_ep = 0
        self.current_step = 0
        self.current_action = np.array([.0, .0, .0])
        self.done = True  # if the episode is done
        self.var = 5.0
        print("Initialized DDPG")
        """ Setting communication"""
        self.sub = rospy.Subscriber('robot_pose',
                                    PA,
                                    self.callback,
                                    queue_size=1)
        self.pub = rospy.Publisher('normalized_state', PC, queue_size=10)
        rospy.wait_for_service('airpress_control', timeout=5)
        self.target_PS = PS()
        self.action_V3 = Vector3()
        self.pc = PC()
        self.pc.header.frame_id = 'world'
        self.state = PA()
        self.updated = False  # if s is updated
        self.got_callback1 = False
        self.got_callback2 = False
        print("Initialized communication")
        """ Reading targets """
        """ The data should be w.r.t origin by base position """
        self.ends = pickle.load(open('./data/ends.p', 'rb'))
        self.x_offset = X_OFFSET
        self.y_offset = Y_OFFSET
        self.z_offset = Z_OFFSET
        self.scaler = 1 / 0.3
        self.sample_target()
        print("Read target data")

        #self.ddpg.restore_momery()
        #self.ddpg.restore_model()

        while not (rospy.is_shutdown()):
            if self.current_ep < MAX_EPISODES:
                if self.current_step < MAX_EP_STEPS:
                    while (not self.updated) & (not rospy.is_shutdown()):
                        rospy.sleep(0.1)
                    s = self.s.copy()

                    delta_a = self.ddpg.choose_action(s) * A_BOUND
                    print delta_a / A_BOUND
                    print("Delta action:")
                    print delta_a
                    delta_a = np.random.normal(delta_a, self.var)
                    print("Noise delta action: ")
                    print delta_a
                    self.current_action += delta_a
                    self.current_action = np.clip(self.current_action, 0, 25)
                    self.action_V3.x, self.action_V3.y, self.action_V3.z \
                        = self.current_action[0], self.current_action[1], self.current_action[2]
                    self.run_action(self.action_V3)
                    rospy.sleep(2.5)
                    print "Current action:"
                    print self.current_action

                    self.updated = False
                    while (not self.updated) & (not rospy.is_shutdown()):
                        rospy.sleep(0.1)
                    s_ = self.s.copy()
                    self.compute_reward(self.end, self.n_t)

                    action = delta_a / A_BOUND
                    print action
                    self.ddpg.store_transition(s, action, self.reward, s_)
                    # print("Experience stored")

                    if self.ddpg.pointer > TRAIN_POINT:
                        if (self.current_step % 2 == 0):
                            self.var *= 0.992
                            self.ddpg.learn()

                    self.current_step += 1
                    self.ep_reward += self.reward

                    if self.reward > GOT_GOAL:
                        self.done = True
                        self.current_step = 0
                        self.current_ep += 1
                        self.sample_target()
                        print "Target Reached"
                        print("Episode %i Ended" % self.current_ep)
                        print(
                            'Episode:',
                            self.current_ep,
                            ' Reward: %d' % self.ep_reward,
                            'Explore: %.2f' % self.var,
                        )
                        print('*' * 40)
                        self.ep_reward = 0
                        self.ddpg.save_memory()
                        self.ddpg.save_model()
                        """
                        self.current_action = np.array([.0, .0, .0])
                        self.action_V3.x, self.action_V3.y, self.action_V3.z \
                            = self.current_action[0], self.current_action[1], self.current_action[2]
                        self.run_action(self.action_V3)
                        """

                    else:
                        self.done = False
                        if self.current_step == MAX_EP_STEPS:
                            print "Target Failed"
                            print("Episode %i Ends" % self.current_ep)
                            print(
                                'Episode:',
                                self.current_ep,
                                ' Reward: %d' % self.ep_reward,
                                'Explore: %.2f' % self.var,
                            )
                            print('*' * 40)
                            self.current_step = 0
                            self.current_ep += 1
                            self.sample_target()
                            self.ep_reward = 0
                            self.ddpg.save_memory()
                            self.ddpg.save_model()
                            """
                            self.current_action = np.array([.0, .0, .0])
                            self.action_V3.x, self.action_V3.y, self.action_V3.z \
                                = self.current_action[0], self.current_action[1], self.current_action[2]
                            self.run_action(self.action_V3)
                            """
                    self.updated = False
                    print('\n')
Ejemplo n.º 6
0
def drone_viz():
    rospy.init_node("viz")
    drones = []
    position_dict = {}
    waypoint_dict = {}
    pub_dict = {}
    rate = rospy.Rate(10)
    pub = rospy.Publisher("viz/box_markers", MarkerArray, queue_size=1)
    markers = {}
    waypoints = {}
    trajectories = {}
    marker_array = MarkerArray()
    global_id = 0
    while not rospy.is_shutdown():
        #on fixed timestip: simulate the system and publish
        nodes = rosnode.get_node_names()
        for node in nodes:
            node = node[1:]
            if node.startswith("drone") and node not in drones:
                drones.append(node)
                rospy.Subscriber("/" + node + "/position", Pose, callback,
                                 (node, position_dict))
                rospy.Subscriber("/" + node + "/waypoint", Float32MultiArray,
                                 callback, (node, waypoint_dict))
        t, lifetime = rospy.Time.now(), rospy.Duration()
        for i, drone in enumerate(drones):
            if drone not in position_dict: continue
            if drone not in markers:
                markers[drone] = Marker()
                marker = markers[drone]
                marker.header.frame_id = "world"
                marker.id = global_id
                global_id += 1
                marker.header.stamp = t
                marker.type = Marker.MESH_RESOURCE
                marker.mesh_resource = "package://drone_viz/src/drone.stl"
                marker.action = Marker.ADD
                marker.scale = Vector3(0.01, 0.01, 0.01)
                marker.color.g = 0.5
                marker.color.a = 1.0
                marker.lifetime = lifetime
                marker_array.markers.append(marker)
            marker = markers[drone]
            marker.pose = position_dict[drone]
            marker.pose = Pose()
            marker.pose.position.x = position_dict[drone].position.x
            marker.pose.position.y = position_dict[drone].position.y
            marker.pose.position.z = position_dict[drone].position.z
            pos_msg = position_dict[drone]
            euler = np.array(
                euler_from_quaternion([
                    pos_msg.orientation.x, pos_msg.orientation.y,
                    pos_msg.orientation.z, pos_msg.orientation.w
                ]))
            euler[0] += -np.pi / 2
            quat = quaternion_from_euler(euler[0], euler[1], euler[2])
            marker.pose.orientation.x = quat[0]
            marker.pose.orientation.y = quat[1]
            marker.pose.orientation.z = quat[2]
            marker.pose.orientation.w = quat[3]

            #waypoint
            if drone not in waypoint_dict: continue
            if drone not in waypoints:
                waypoints[drone] = Marker()
                marker = waypoints[drone]
                marker.header.frame_id = "world"
                marker.id = global_id
                global_id += 1
                marker.header.stamp = t
                marker.type = Marker.POINT
                marker.action = Marker.ADD
                marker.scale = Vector3(0.1, 0.1, 0.1)
                marker.color.r = 1
                marker.color.a = 1.0
                marker.lifetime = lifetime
                marker_array.markers.append(marker)
            marker = waypoints[drone]
            if marker.points[-1] != waypoints_dict[drone]:
                curr_waypoint = waypoints_dict[drone].data
                marker_point = Point()
                marker_point.x = curr_waypoint[0]
                marker_point.y = curr_waypoint[1]
                marker_point.z = curr_waypoint[2]
                marker.points.append(marker_point)

            #trajectories
            if drone not in trajectories:
                trajectories[drone] = Marker()
                marker = trajectories[drone]
                marker.header.frame_id = "world"
                marker.id = global_id
                global_id += 1
                marker.header.stamp = t
                marker.type = Marker.LINE_STRIP
                marker.action = Marker.ADD
                marker.scale = Vector3(0.1, 0.1, 0.1)
                marker.color.b = 1
                marker.color.a = 1.0
                marker.lifetime = lifetime
                marker_array.markers.append(marker)
            marker = trajectories[drone]
            if marker.points[-1] != waypoints_dict[drone]:
                curr_waypoint = waypoints_dict[drone].data
                marker_point = Point()
                marker_point.x = curr_waypoint[0]
                marker_point.y = curr_waypoint[1]
                marker_point.z = curr_waypoint[2]
                marker.points.append(marker_point)

        pub.publish(marker_array)
        rate.sleep()
 def listerner_callback(self, msg):
     cv_bridge = CvBridge()
     img = cv_bridge.imgmsg_to_cv2(msg)
     img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
     angle = 0.365
     max_height = 350
     min_height = 200
     img = img[len(img) - max_height:len(img) - min_height]
     img = cv2.resize(img, (100, int((max_height - min_height) / 2)),
                      interpolation=cv2.INTER_AREA)
     img_width = len(img[0])
     img2 = np.zeros((len(img), len(img[0])))
     for i in range(len(img)):
         left_cutoff = int(i * angle)
         right_cutoff = len(img[i]) - (left_cutoff)
         temp = np.zeros((1, right_cutoff - left_cutoff))
         temp[0] = img[len(img) - 1 - i][left_cutoff:right_cutoff]
         temp = cv2.resize(temp, (img_width, 1),
                           interpolation=cv2.INTER_AREA)
         img[len(img) - 1 - i] = temp[0]
     #end_to_start = img_width/(img_width-2*angle*(max_height-min_height))
     if (self.first):
         self.reference_scanline = self.scanLine(img)
     min_radius = 200
     searches = 50
     curr = -1 / min_radius
     max_added_width = int(
         math.ceil(math.sqrt(min_radius**2 - ((len(img) - 1)**2))))
     max_width = img_width + 2 * max_added_width
     values = np.zeros(searches)
     for i in range(searches):
         temp_image = np.zeros((len(img), max_width))
         if (curr == 0):
             shift = 0
             values[i] = self.score(img)
             continue
         rad = 1 / curr
         for j in range(len(img)):
             shift = int(abs(rad) - math.sqrt(rad**2 - j**2))
             if (rad < 0):
                 shift *= -1
             temp_image[len(img) - 1 -
                        j][max_added_width + shift:max_added_width + shift +
                           img_width] = img[len(img) - 1 - j]
         values[i] = self.score(temp_image)
         curr += (1 / min_radius) / (searches / 2)
     turn_radius = 0
     best = 0
     for i in range(searches):
         if (values[i] > best):
             best = values[i]
             turn_radius = (-1 / min_radius) + ((1 / min_radius) /
                                                (searches / 2)) * i
     shifted_best = np.zeros((len(img), max_width))
     if (turn_radius == 0):
         shifted_best = img
     else:
         rad = 1 / turn_radius
         for j in range(len(img)):
             shift = int(abs(rad) - math.sqrt(rad**2 - j**2))
             if (rad < 0):
                 shift *= -1
             shifted_best[len(img) - 1 -
                          j][max_added_width + shift:max_added_width +
                             shift + img_width] = img[len(img) - 1 - j]
     offset = self.offset_calc(self.scanLine(shifted_best))
     cv2.imshow("shifted_image", shifted_best)
     cv2.waitKey(0)
     #if(turn_radius != 0):
     #    turn_radius = 1/turn_radius
     self.get_logger().info('offset: "%f"' % offset)
     self.get_logger().info('turn_raidus: "%f"' % turn_radius)
     twist_msg = Twist()
     twist_msg.angular = Vector3()
     x_speed = 0.5
     offset_r = offset * .03
     turn_radius_r = turn_radius / 0.03
     if (offset_r > 0):
         offset_r = min(offset_r, 0.3)
     else:
         offset_r = max(offset_r, -0.3)
     if (turn_radius_r > 0):
         turn_radius_r = min(turn_radius_r, 0.8)
     else:
         turn_radius_r = max(turn_radius_r, -0.8)
     if (abs(turn_radius) > 0.004):
         x_speed -= abs(turn_radius - 0.004) * 5
         if (offset_r * turn_radius_r < 0):
             if (offset_r > 0):
                 offset_r = min(offset_r, 0.15)
             else:
                 offset_r = max(offset_r, -0.15)
     twist_msg.angular = Vector3()
     twist_msg.angular.z = turn_radius_r + offset_r
     self.get_logger().info('turning: "%f"' % (turn_radius_r + offset_r))
     self.get_logger().info('turning: "%f"' % twist_msg.angular.z)
     twist_msg.linear = Vector3()
     twist_msg.linear.x = x_speed
     self.twist_pub.publish(twist_msg)
    # Start time
    last_time = time.time()
    num_frames = 0
    while not rospy.is_shutdown():
        curr_time = time.time()
        # Capture frame-by-frame
        ret, frame = cap.read() 
        num_frames += 1
        if (display_original_image): # Dispaly the image if param is set to true       
            cv2.imshow('original',frame)              
        # Gray scale image            
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        # Use the classifier to find faces
        faces = face_cascade.detectMultiScale(gray, 1.3, 5)
        center=Vector3() 
        for index,(x,y,w,h) in enumerate(faces): # For each face publish the centroid
            #print("Frame is: %d by %d and x,y %d, %d" %(frame.shape[1],frame.shape[0],x,y))
            frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2) # Draw a frame around each face
            # publish center of face
            center.x=x+w/2
            center.y=y+h/2
            center.z=index+1 # unique id for multiple faces, zero indexed
            pub.publish(center)
        if center.z == 0: # No faces found so aim at center
            center.x = 320 # This is the set Point TODO use a param
            pub.publish(center)
        if (display_tracking_image): # Dispaly the image if param is set to true
            cv2.imshow('tracking',frame)     
        #except Exception as e:
        #    print(e)
Ejemplo n.º 9
0
def robot_control(msg):
    
    # CW (rotates right): negative
    # CCW (rotates left): positive
    
    left = msg.ranges[:360]
    front = msg.ranges[360:481]
    right = msg.ranges[481:]
        
    if min(left) < 0.9 and min(front) >= 1.0 :
        # If it's close to the wall, go forward
        # and keep closing to the wall
        message = Twist(
            Vector3(0.3, 0, 0),
            Vector3(0, 0, 0.45)
        )
        
    if min(right) < 0.9 and min(front) >= 1.0 :
        # If it's close to the wall, go forward
        # and keep closing to the wall
        message = Twist(
            Vector3(0.3, 0, 0),
            Vector3(0, 0, -0.45)
        )
        
    elif min(left) < 0.5 :
        # If the robot is very close to the wall,
        # only rotates to the other side
        message = Twist(
            Vector3(0, 0, 0),
            Vector3(0, 0, -0.25)
        )
        
    elif min(right) < 0.5 :
        # If the robot is very close to the wall,
        # only rotates to the other side
        message = Twist(
            Vector3(0, 0, 0),
            Vector3(0, 0, 0.25)
        )
        
    elif min(left) < 2.0 and min(front) < 2.0 :
        # If it's closing to the wall,
        # slows the velocity and rotate agressively
        message = Twist(
            Vector3(0.25, 0, 0),
            Vector3(0, 0, 0.65)
        )
        
    elif min(right) < 2.0 and min(front) < 2.0 :
        message = Twist(
            Vector3(0.35, 0, 0),
            Vector3(0, 0, -0.65)
        )
        
    else :
        # Move forward
        message = Twist(
            Vector3(0.55,0,0),
            Vector3(0,0,0)
        )
        
    pub.publish(message)
Ejemplo n.º 10
0
def callback_pid(pose):
    global ukf_x, ukf_y, ukf_z, pid_x, pid_y, pid_z, pid_yaw, feedback_x, feedback_y, feedback_z, feedback_yaw, last_feedback_yaw, d_dyaw, dd_dyaw, direction
    global output_yaw, total_distance, odom_roll, odom_pitch, odom_yaw, init_yaw
    vel_control = TwistStamped()
    pos_control = PoseStamped()

    scale_z = 0.08
    scale_y = 0.4
    scale_yaw = 1

    scale_yaw = scale_yaw / total_distance

    pid_y.update(feedback_y)
    output_y = pid_y.output * scale_y
    feedback_y = ukf_y
    pid_y.SetPoint = pose.pose.position.y

    pid_z.update(feedback_z)
    output_z = -pid_z.output * scale_z
    feedback_z = ukf_z
    pid_z.SetPoint = pose.pose.position.z

    feedback_yaw = odom_yaw
    if feedback_yaw - last_feedback_yaw != 0:
        pid_yaw.update(feedback_yaw)
        output_yaw = pid_yaw.output * scale_yaw * direction
        #pid_yaw.SetPoint = pose.pose.orientation.z
        pid_yaw.SetPoint = dyaw + feedback_yaw

        print " yaw_set:", pid_yaw.SetPoint, " yaw_out:", output_yaw, " yaw_Feb:", feedback_yaw, "dfb:", abs(
            feedback_yaw) - abs(last_feedback_yaw)
    dfeedback = abs(feedback_yaw) - abs(last_feedback_yaw)
    if dfeedback < 0:
        d_dyaw = []
        dd_dyaw = []
    elif dfeedback > 0:
        d_dyaw.append(dfeedback)
        print "d_dyaw", d_dyaw
        if len(d_dyaw) == 3:
            if d_dyaw[2] > 0 and d_dyaw[1] > 0 and d_dyaw[0] > 0:
                dd_dyaw.append(d_dyaw[2] - d_dyaw[1])
                dd_dyaw.append(d_dyaw[1] - d_dyaw[0])
                print "dd_dyaw", dd_dyaw
                if dd_dyaw[1] > 0 and dd_dyaw[0] > 0:
                    direction = -direction
                    print "CHANGE!!!!!!!!"
            dd_dyaw = []
            d_dyaw = []
            # print  "dd_dyaw", dd_dyaw
            # #print dd_dyaw[1] - dd_dyaw[0]
            # if dd_dyaw[1] - dd_dyaw[0] > 0:
            #     direction = - direction
            #     print "CHANGE!!!!!!!!"
            # dd_dyaw = []
            # d_dyaw = []

    last_feedback_yaw = feedback_yaw

    setpoint = Vector3()
    setpoint.x = 0
    setpoint.y = output_y
    setpoint.z = output_z
    stamp = rospy.get_rostime()
    msg = PositionTarget(
        coordinate_frame=PositionTarget.FRAME_LOCAL_NED,
        type_mask=PositionTarget.IGNORE_PX + PositionTarget.IGNORE_PY +
        PositionTarget.IGNORE_PZ + PositionTarget.IGNORE_AFX +
        PositionTarget.IGNORE_AFY + PositionTarget.IGNORE_AFZ +
        PositionTarget.IGNORE_YAW_RATE,
        velocity=setpoint,
        yaw=feedback_yaw + output_yaw - np.pi / 2)
    msg.header.stamp = stamp
    position_pub.publish(msg)
Ejemplo n.º 11
0
def create_vector3(xdata, ydata, zdata):
    v = Vector3()
    v.x = xdata
    v.y = ydata
    v.z = zdata
    return v
Ejemplo n.º 12
0
        delta_th = vth * dt

        x += delta_x
        y += delta_y
        th += delta_th

        # since all odometry is 6DOF we'll need a quaternion created from yaw
        odom_quat = tf.transformations.quaternion_from_euler(0, 0, th)

        # first, we'll publish the transform over tf
        odom_broadcaster.sendTransform((x, y, 0.), odom_quat, current_time,
                                       "base_footprint", "odom")

        # next, we'll publish the odometry message over ROS
        odom = Odometry()
        odom.header.stamp = current_time
        odom.header.frame_id = "odom"

        # set the position
        odom.pose.pose = Pose(Point(x, y, 0.), Quaternion(*odom_quat))

        # set the velocity
        odom.child_frame_id = "base_footprint"
        odom.twist.twist = Twist(Vector3(vx, vy, 0), Vector3(0, 0, vth))

        # publish the message
        odom_pub.publish(odom)

        last_time = current_time
        r.sleep()
Ejemplo n.º 13
0
    def odom_callback(self, odom_msg):
        self.slow_vel_count += 1
        if (self.initialization):
            self.initialization = 0
            w_0 = 1. / self.M * np.ones((5, 1))
            self.gsf_obj = GSF(self.M, w_0)
            for i in range(self.M):
                # Establish x_0
                if i == 0:
                    x_0 = np.empty([5, 1])
                    x_0[0] = odom_msg.pose.pose.position.x + 0.1
                    x_0[1] = odom_msg.pose.pose.position.y
                    orientation_quat = odom_msg.pose.pose.orientation
                    orientation_euler = tf.transformations.euler_from_quaternion(
                        [
                            orientation_quat.x, orientation_quat.y,
                            orientation_quat.z, orientation_quat.w
                        ])
                    x_0[2] = orientation_euler[2]
                    x_0[3] = 0
                    x_0[4] = 0
                if i == 1:
                    x_0 = np.empty([5, 1])
                    x_0[0] = odom_msg.pose.pose.position.x - 0.1
                    x_0[1] = odom_msg.pose.pose.position.y
                    orientation_quat = odom_msg.pose.pose.orientation
                    orientation_euler = tf.transformations.euler_from_quaternion(
                        [
                            orientation_quat.x, orientation_quat.y,
                            orientation_quat.z, orientation_quat.w
                        ])
                    x_0[2] = orientation_euler[2]
                    x_0[3] = 0
                    x_0[4] = 0
                if i == 2:
                    x_0 = np.empty([5, 1])
                    x_0[0] = odom_msg.pose.pose.position.x
                    x_0[1] = odom_msg.pose.pose.position.y + 0.1
                    orientation_quat = odom_msg.pose.pose.orientation
                    orientation_euler = tf.transformations.euler_from_quaternion(
                        [
                            orientation_quat.x, orientation_quat.y,
                            orientation_quat.z, orientation_quat.w
                        ])
                    x_0[2] = orientation_euler[2]
                    x_0[3] = 0
                    x_0[4] = 0
                if i == 3:
                    x_0 = np.empty([5, 1])
                    x_0[0] = odom_msg.pose.pose.position.x
                    x_0[1] = odom_msg.pose.pose.position.y - 0.1
                    orientation_quat = odom_msg.pose.pose.orientation
                    orientation_euler = tf.transformations.euler_from_quaternion(
                        [
                            orientation_quat.x, orientation_quat.y,
                            orientation_quat.z, orientation_quat.w
                        ])
                    x_0[2] = orientation_euler[2]
                    x_0[3] = 0
                    x_0[4] = 0
                if i == 4:
                    x_0 = np.empty([5, 1])
                    x_0[0] = odom_msg.pose.pose.position.x
                    x_0[1] = odom_msg.pose.pose.position.y
                    self.x_prev = x_0[0]
                    self.y_prev = x_0[1]
                    orientation_quat = odom_msg.pose.pose.orientation
                    orientation_euler = tf.transformations.euler_from_quaternion(
                        [
                            orientation_quat.x, orientation_quat.y,
                            orientation_quat.z, orientation_quat.w
                        ])
                    x_0[2] = orientation_euler[2]
                    x_0[3] = 0
                    x_0[4] = 0

                # Establish P_0
                P_0 = 0.1 * np.identity(5)

                # Initialize EKFs
                self.gsf_obj.gsf_fill_dict(i, x_0, P_0)

            self.time_prev = odom_msg.header.stamp
            self.slow_time_prev = self.time_prev

        time_curr = odom_msg.header.stamp
        d = time_curr - self.time_prev
        delta_t = d.to_sec()

        # Establish u_k from imu_msg
        imu_msg = rospy.wait_for_message("/L01/imu_raw", Imu)
        u = np.empty([3, 1])
        u[0][0] = imu_msg.angular_velocity.z
        u[1][0] = imu_msg.linear_acceleration.x
        u[2][0] = imu_msg.linear_acceleration.y
        self.gsf_obj.gsf_predict(u, delta_t)

        # Establish y_k+1 from odom msg
        y = np.empty([3, 1])
        y[0][0] = odom_msg.pose.pose.position.x
        y[1][0] = odom_msg.pose.pose.position.y
        orientation_quat = odom_msg.pose.pose.orientation
        orientation_euler = tf.transformations.euler_from_quaternion([
            orientation_quat.x, orientation_quat.y, orientation_quat.z,
            orientation_quat.w
        ])
        y[2][0] = orientation_euler[2]

        # Can possibly create fake velocities here?
        if (self.slow_vel_count % 100 == 0):
            # Find Current values
            slow_time_curr = odom_msg.header.stamp
            x_curr = y[0]
            y_curr = y[1]
            slow_time_delta = slow_time_curr - self.slow_time_prev
            delta_t_slow = slow_time_delta.to_sec()

            # Find velocitie estimates
            x_vel = (x_curr - self.x_prev) / delta_t_slow
            y_vel = (y_curr - self.y_prev) / delta_t_slow
            y_new = np.empty([5, 1])
            y_new[0] = y[0]
            y_new[1] = y[1]
            y_new[2] = y[2]
            y_new[3] = x_vel
            y_new[4] = y_vel
            # Set previous values to current values
            self.x_prev = x_curr
            self.y_prev = y_curr
            self.slow_time_prev = slow_time_curr
            y = y_new

        # Given measurements, correct x_hat estimate
        self.gsf_obj.gsf_correct(y, delta_t)

        # Get and publish MMSE
        odom_output = Odometry()
        odom_output.header.stamp = rospy.Time.now()
        odom_output.header.frame_id = self.frame_id

        mu_k_plus_one_plus = self.gsf_obj.get_mu()
        self.x_store = np.append(self.x_store, mu_k_plus_one_plus, axis=1)
        rotation_quat = tf.transformations.quaternion_from_euler(
            0, 0, mu_k_plus_one_plus[2])
        odom_output.pose.pose = Pose(
            Point(mu_k_plus_one_plus[0], mu_k_plus_one_plus[1], 0),
            Quaternion(*rotation_quat))
        odom_output.twist.twist = Twist(
            Vector3(mu_k_plus_one_plus[3], mu_k_plus_one_plus[4], 0),
            Vector3(0, 0, u[0]))

        self.filter_output_pub.publish(odom_output)
        Sigma_k_plus_one_plus = self.gsf_obj.get_sigma()

        self.P_store = np.append(self.P_store,
                                 Sigma_k_plus_one_plus.reshape(25, 1),
                                 axis=1)
        P_msg = Float64MultiArray()
        P_msg.layout.dim.append(MultiArrayDimension())
        P_msg.layout.dim[0].size = 5
        P_msg.layout.dim[0].stride = 25
        P_msg.layout.dim.append(MultiArrayDimension())
        P_msg.layout.dim[1].size = 5
        P_msg.layout.dim[1].stride = 5
        P_msg.data = Sigma_k_plus_one_plus.reshape(25)

        # rospy.loginfo("Publishing y")
        self.P_publish.publish(P_msg)

        self.time_prev = time_curr
        if (self.slow_vel_count == 22000):
            #print "Saving to ekf.mat"
            #scipy.io.savemat('ekf',{'x': self.x_store, 'P': self.P_store})
            self.odom_sub.unregister()
            print "Save to csv"
            np.savetxt("/home/kyle/catkin_ws/src/ASEN_Project/bag/gsf_x.csv",
                       self.x_store,
                       delimiter=",")
            np.savetxt("/home/kyle/catkin_ws/src/ASEN_Project/bag/gsf_P.csv",
                       self.P_store,
                       delimiter=",")
            print "Save complete"
            rospy.signal_shtudown("Ended GSF")
Ejemplo n.º 14
0
def read_cb(data):
    global ang_min
    global a_inc
    global radius
    global rmin
    global rmax
    global prev_x
    global prev_y
    global prev_z

    # define constants
    leg_size_min = 0.05
    leg_size_max = 0.15
    depth_min = 0.01
    depth_max = 0.07
    connect_dist = 0.05

    # reset feature variables
    angle = []
    far = 0
    vector = Vector3()
    temp_i = 0
    temp_f = 0
    clear_flag = 0
    leg = []
    leg_c = 0

    # ensure laser variables are up to date
    ang_min = data.angle_min
    ang_max = data.angle_max
    a_inc = data.angle_increment

    # initialise scan data to be mutable object
    new_scan = list(data.ranges)
    print('New Leg')

    # reject invalid data and initialise new data to be processed
    for d, data_l in enumerate(data.ranges):
        angle.append(ang_min + d * a_inc)
        if data_l < rmin or data_l > rmax or math.isnan(data_l):
            new_scan[d] = float('nan')

    # attempt to algorithm
    for c, scan in enumerate(new_scan):
        if c == (len(new_scan) - 1):
            break
        if abs(new_scan[c] - new_scan[c + 1]) < connect_dist:
            if temp_i == 0:
                temp_i = c
            continue
        elif temp_i != 0:
            temp_f = c
            clear_flag = 1
        if temp_f - temp_i > 5:
            print('Entry {:f} and {:f}'.format(angle[temp_i], angle[temp_f]))
            if leg_size_min < polar_dist(new_scan[temp_i], angle[temp_i],
                                         new_scan[temp_f],
                                         angle[temp_f]) < leg_size_max:
                mid = int(math.ceil((temp_f - temp_i) / 2) + temp_i)
                if depth_min < (new_scan[temp_i] -
                                new_scan[mid]) < depth_max and depth_min < (
                                    new_scan[temp_f] -
                                    new_scan[mid]) < depth_max:
                    ini_angle = round(180 / math.pi * (angle[temp_i]), 3)
                    fin_angle = round(180 / math.pi * (angle[temp_f]), 3)
                    leg.append([angle[mid], new_scan[mid]])
                    print('Leg between {:f} and {:f}'.format(
                        ini_angle, fin_angle))
        if clear_flag == 1:
            temp_i = 0
            temp_f = 0
            clear_flag = 0

    # convert legs to useful information
    # leg[0][i] is the first leg entry discovered starting from negative angles
    # leg[i][0] is the angle of corresponding leg
    # leg[i][1] is the distance of corresponding leg
    if len(leg) == 0:
        if prev_x != 0 and prev_y != 0:
            x_leg = prev_x
            y_leg = prev_y
            z_leg = prev_z + 1
        else:
            x_leg = 0
            y_leg = 0
            z_leg = 0
    elif len(leg) == 1:
        x_leg = leg[0][1] * math.cos(leg[0][0])
        y_leg = leg[0][1] * math.sin(leg[0][0])
        z_leg = 1
    elif len(leg) == 2:
        if leg[1][0] - leg[0][0] < 15:
            x_leg = (leg[0][1] + leg[1][1]) / 2 * math.cos(
                (leg[0][0] + leg[1][0]) / 2)
            y_leg = (leg[0][1] + leg[1][1]) / 2 * math.sin(
                (leg[0][0] + leg[1][0]) / 2)
            z_leg = 1
        elif abs(leg[0][0]) < abs(leg[1][0]):
            x_leg = leg[0][1] * math.cos(leg[0][0])
            y_leg = leg[0][1] * math.sin(leg[0][0])
            z_leg = 1
        else:
            x_leg = leg[1][1] * math.cos(leg[1][0])
            y_leg = leg[1][1] * math.sin(leg[1][0])
            z_leg = 1
    else:
        if prev_x != 0 and prev_y != 0:
            x_leg = prev_x
            y_leg = prev_y
            z_leg = prev_z + 1
        else:
            x_leg = 0
            y_leg = 0
            z_leg = 0

    vector.x = x_leg
    vector.y = y_leg
    vector.z = z_leg
    prev_x = x_leg
    prev_y = y_leg
    prev_z = z_leg

    leg_pub.publish(vector)
Ejemplo n.º 15
0
 def velocity(self):
     k = self.k.values()
     if k == None:
         return Vector3()
     v = Vector3(k[0], k[1], k[2])
     return v
Ejemplo n.º 16
0
    markers.publishPath(path, 'orange', width, 5.0) # path, color, width, lifetime


    # Plane / Rectangle:

    # Publish a rectangle between two points (thin, planar surface)
    # If the z-values are different, this will produce a cuboid
    point1 = Point(-1,0,0)
    point2 = Point(-2,-1,0) 
    markers.publishRectangle(point1, point2, 'black', 5.0)

    # Text:

    # Publish some text using a ROS Pose Msg
    P = Pose(Point(0,4,0),Quaternion(0,0,0,1))
    scale = Vector3(1,1,1)
    markers.publishText(P, 'ARTIV Visualization', 'white', scale, 5.0) # pose, text, color, scale, lifetime


    # Cube / Cuboid:

    # Publish a cube using a numpy transform matrix
    T = transformations.translation_matrix((-3,2.2,0))
    cube_width = 0.5 # cube is 0.5x0.5x0.5
    markers.publishCube(T, 'green', cube_width, 5.0) # pose, color, cube_width, lifetime

    center = Point(-3,2.2, 0.5)
    P = Pose(center,Quaternion(0,0,0,1))
    scale = Vector3(0.15,0.15,0.2)
    markers.publishText(P, 'E1', 'white', scale, 5.0) # pose, text, color, scale, lifetime
Ejemplo n.º 17
0
    x_mission_lambert, y_mission_lambert = PROJECT(longitude_mission,
                                                   latitude_mission)

    return x_lambert, y_lambert, x_mission_lambert, y_mission_lambert


rospy.init_node('convert2Lambert')

input_GPS_msg = rospy.get_param('Input_GPS_msg', 'nav')
input_yaw_msg = rospy.get_param('Input_yaw_msg', 'imu_attitude')

output_msg = rospy.get_param('Output_msg', 'gps_angle_boat')

sub = rospy.Subscriber(input_GPS_msg, NavSatFix, gpsCallback)
sub = rospy.Subscriber(input_yaw_msg, Vector3Stamped, angleCallback)
pub = rospy.Publisher(output_msg, Twist, queue_size=10)

rate = rospy.Rate(25)

pose = Twist()

while not rospy.is_shutdown():

    x_lambert, y_lambert, x_mission_lambert, y_mission_lambert = convert()

    pose = Twist(Vector3(x_lambert, y_lambert, 0), Vector3(0, 0, yaw))

    pub.publish(pose)

    rate.sleep()
Ejemplo n.º 18
0
    def fuse_head_position_rf(self):
        if self.fusion_mode == 'both':
            with self.lock_right_cam_user_head_position and self.lock_left_cam_user_head_position:
                if (self.last_right_cam_user_head_certainty
                        == 0.0) and (self.last_left_cam_user_head_certainty
                                     == 0.0):
                    self.last_user_head_x = 0.0
                    self.last_user_head_y = 0.0
                    self.last_user_head_z = 0.0
                    self.last_user_head_certainty = 0.0
                elif (self.last_right_cam_user_head_certainty == 0.0):
                    self.last_user_head_x = self.last_left_cam_user_head_x
                    self.last_user_head_y = self.last_left_cam_user_head_y
                    self.last_user_head_z = self.last_left_cam_user_head_z
                    self.last_user_head_certainty = self.last_left_cam_user_head_certainty
                elif (self.last_left_cam_user_head_certainty == 0.0):
                    self.last_user_head_x = self.last_right_cam_user_head_x
                    self.last_user_head_y = self.last_right_cam_user_head_y
                    self.last_user_head_z = self.last_right_cam_user_head_z
                    self.last_user_head_certainty = self.last_right_cam_user_head_certainty
                else:
                    _sum = self.last_right_cam_user_head_certainty + self.last_left_cam_user_head_certainty
                    self.last_user_head_x = (
                        self.last_right_cam_user_head_certainty *
                        self.last_right_cam_user_head_x +
                        self.last_left_cam_user_head_certainty *
                        self.last_left_cam_user_head_x) / _sum
                    self.last_user_head_y = (
                        self.last_right_cam_user_head_certainty *
                        self.last_right_cam_user_head_y +
                        self.last_left_cam_user_head_certainty *
                        self.last_left_cam_user_head_y) / _sum
                    self.last_user_head_z = (
                        self.last_right_cam_user_head_certainty *
                        self.last_right_cam_user_head_z +
                        self.last_left_cam_user_head_certainty *
                        self.last_left_cam_user_head_z) / _sum
                    self.last_user_head_certainty = (
                        0.5 * self.last_right_cam_user_head_certainty +
                        0.5 * self.last_left_cam_user_head_certainty)
        elif self.fusion_mode == 'right':
            with self.lock_right_cam_user_head_position:
                self.last_user_head_x = self.last_right_cam_user_head_x
                self.last_user_head_y = self.last_right_cam_user_head_y
                self.last_user_head_z = self.last_right_cam_user_head_z
                self.last_user_head_certainty = self.last_right_cam_user_head_certainty
        elif self.fusion_mode == 'left':
            with self.lock_left_cam_user_head_position:
                self.last_user_head_x = self.last_left_cam_user_head_x
                self.last_user_head_y = self.last_left_cam_user_head_y
                self.last_user_head_z = self.last_left_cam_user_head_z
                self.last_user_head_certainty = self.last_left_cam_user_head_certainty

        # publish head position in rf
        vwc = VectorWithCertainty()
        vwc.certainty = self.last_user_head_certainty
        _vec = Vector3()
        _vec.x = self.last_user_head_x
        _vec.y = self.last_user_head_y
        _vec.z = self.last_user_head_z
        vwc.position = _vec
        self.fused_user_head_rf_pub.publish(vwc)
Ejemplo n.º 19
0
        try:
            (trans,rot) = listener.lookupTransform('world', 'motion_shield', rospy.Time(0))
        except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
            continue

        init_object_quat = tf.transformations.quaternion_from_euler(math.pi/2,0,0)
        init_object_pos = Point(-0.215, 0.543, 0)


        curr_quat = tf.transformations.quaternion_multiply(init_object_quat, rot)




        marker = Marker(
                        type=Marker.MESH_RESOURCE,
                        action=Marker.ADD,
                        id=0,
                        #lifetime=rospy.Duration(1.5),
                        #pose=Pose(Point(-0.215, 0.543, 0), Quaternion(init_quat[0], init_quat[1], init_quat[2], init_quat[3])),

                        pose=Pose(Point(-0.215, 0.543, 0), Quaternion(rot[0], rot[1], rot[2], rot[3])),
                        scale=Vector3(0.001, .001, .001),
                        header=Header(frame_id='world'),
                        color=ColorRGBA(0.0, 1.0, 0.0, 0.8),
                        mesh_resource="file:///home/nazir/ws_moveit/src/ur10_cm/models/right_cone.dae"
                        )

        marker_pub.publish(marker)
        rate.sleep()
Ejemplo n.º 20
0
    def fuse_gaze_point_wf(self):
        if self.fusion_mode == 'both':
            with self.lock_right_cam_user_gaze_point and self.lock_left_cam_user_gaze_point:
                if (self.last_right_cam_gaze_point_and_direction.certainty
                        == 0.0
                    ) and (
                        self.last_left_cam_gaze_point_and_direction.certainty
                        == 0.0):
                    self.last_gaze_point_wf = Vector3(0, 0, 0)
                    self.last_head_position_wf = Vector3(0, 0, 0)
                    self.last_hfv_wf = Vector3(0, 0, 0)
                    self.last_gaze_point_certainty = 0.0
                elif (self.last_right_cam_user_head_certainty == 0.0):
                    self.last_gaze_point_wf = self.last_left_cam_gaze_point_and_direction.gaze_point
                    self.last_head_position_wf = self.last_left_cam_gaze_point_and_direction.head_position
                    self.last_hfv_wf = self.last_left_cam_gaze_point_and_direction.hfv
                    self.last_gaze_point_certainty = self.last_left_cam_gaze_point_and_direction.certainty
                elif (self.last_left_cam_user_head_certainty == 0.0):
                    self.last_gaze_point_wf = self.last_right_cam_gaze_point_and_direction.gaze_point
                    self.last_head_position_wf = self.last_right_cam_gaze_point_and_direction.head_position
                    self.last_hfv_wf = self.last_right_cam_gaze_point_and_direction.hfv
                    self.last_gaze_point_certainty = self.last_right_cam_gaze_point_and_direction.certainty
                else:
                    _fused_vec3 = Vector3()
                    # gaze point
                    _sum = self.last_right_cam_gaze_point_and_direction.certainty + self.last_left_cam_gaze_point_and_direction.certainty
                    _fused_vec3.x = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.
                        gaze_point.x +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.gaze_point.
                        x) / _sum
                    _fused_vec3.y = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.
                        gaze_point.y +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.gaze_point.
                        y) / _sum
                    _fused_vec3.z = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.
                        gaze_point.z +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.gaze_point.
                        z) / _sum
                    self.last_gaze_point_wf = _fused_vec3
                    # head position in wf
                    _fused_vec3.x = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.
                        head_position.x +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.
                        head_position.x) / _sum
                    _fused_vec3.y = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.
                        head_position.y +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.
                        head_position.y) / _sum
                    _fused_vec3.z = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.
                        head_position.z +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.
                        head_position.z) / _sum
                    self.last_head_position_wf = _fused_vec3
                    # hfv
                    _fused_vec3.x = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.hfv.x +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.hfv.x
                    ) / _sum
                    _fused_vec3.y = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.hfv.y +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.hfv.y
                    ) / _sum
                    _fused_vec3.z = (
                        self.last_right_cam_gaze_point_and_direction.certainty
                        * self.last_right_cam_gaze_point_and_direction.hfv.z +
                        self.last_left_cam_gaze_point_and_direction.certainty *
                        self.last_left_cam_gaze_point_and_direction.hfv.z
                    ) / _sum
                    self.last_hfv_wf = _fused_vec3
                    # certainty
                    self.last_gaze_point_certainty = (
                        0.5 *
                        self.last_right_cam_gaze_point_and_direction.certainty
                        + 0.5 *
                        self.last_left_cam_gaze_point_and_direction.certainty)
        elif self.fusion_mode == 'right':
            with self.lock_right_cam_user_gaze_point:
                self.last_gaze_point_wf = self.last_right_cam_gaze_point_and_direction.gaze_point
                self.last_head_position_wf = self.last_right_cam_gaze_point_and_direction.head_position
                self.last_hfv_wf = self.last_right_cam_gaze_point_and_direction.hfv
                self.last_gaze_point_certainty = self.last_right_cam_gaze_point_and_direction.certainty
        elif self.fusion_mode == 'left':
            with self.lock_left_cam_user_gaze_point:
                self.last_gaze_point_wf = self.last_left_cam_gaze_point_and_direction.gaze_point
                self.last_head_position_wf = self.last_left_cam_gaze_point_and_direction.head_position
                self.last_hfv_wf = self.last_left_cam_gaze_point_and_direction.hfv
                self.last_gaze_point_certainty = self.last_left_cam_gaze_point_and_direction.certainty

        # publish gaze point and direction in wf
        gpd = GazePointAndDirection()
        # certainties for gaze point and head position should be the same
        gpd.certainty = self.last_gaze_point_certainty
        gpd.gaze_point = self.last_gaze_point_wf
        gpd.head_position = self.last_head_position_wf
        gpd.hfv = self.last_hfv_wf
        gpd.role_confidence = self.last_left_cam_gaze_point_and_direction.role_confidence
        gpd.role = gpd.CHILD_ROLE

        self.fused_gaze_point_wf_pub.publish(gpd)
Ejemplo n.º 21
0
 def __init__(self):
     super(DistributedTFNode, self).__init__(
         node_name='distributed_tf_node',
         node_type=NodeType.SWARM
     )
     # get static parameter - `~veh`
     try:
         self.robot_hostname = rospy.get_param('~veh')
     except KeyError:
         self.logerr('The parameter ~veh was not set, the node will abort.')
         exit(1)
     # get static parameter - `~map`
     try:
         self.map_name = rospy.get_param('~map')
     except KeyError:
         self.logerr('The parameter ~map was not set, the node will abort.')
         exit(2)
     # make sure the map exists
     maps = dw.list_maps()
     if self.map_name not in maps:
         self.logerr(f"Map `{self.map_name}` not found in "
                     f"duckietown-world=={dw.__version__}. "
                     f"The node will abort.")
         exit(2)
     self._map = dw.load_map(self.map_name)
     # create communication group
     self._group = DTCommunicationGroup("/autolab/tf", AutolabTransform)
     # create TF between the /world frame and the origin of this map
     self._world_to_map_origin_tf = AutolabTransform(
         origin=AutolabReferenceFrame(
             time=rospy.Time.now(),
             type=AutolabReferenceFrame.TYPE_WORLD,
             name="world",
             robot="*"
         ),
         target=AutolabReferenceFrame(
             time=rospy.Time.now(),
             type=AutolabReferenceFrame.TYPE_MAP_ORIGIN,
             name="map",
             robot=self.robot_hostname
         ),
         is_fixed=True,
         is_static=False,
         transform=Transform(
             translation=Vector3(x=0, y=0, z=0),
             rotation=Quaternion(x=0, y=0, z=0, w=1)
         )
     )
     # create static tfs holder and access semaphore
     self._static_tfs = [
         self._world_to_map_origin_tf
     ]
     # add TFs from ground tags
     self._static_tfs.extend(self._get_tags_tfs())
     # create publisher
     self._tf_pub = self._group.Publisher()
     # publish right away and then set a timer
     self._publish_tfs()
     self._pub_timer = rospy.Timer(
         rospy.Duration(self.PUBLISH_TF_STATIC_EVERY_SECS),
         self._publish_tfs
     )
Ejemplo n.º 22
0
    def fuse_gaze_point_wf_2(self):
        if self.fusion_mode == 'both':
            with self.lock_right_cam_user_gaze_point and self.lock_left_cam_user_gaze_point:
                if (self.last_right_cam_gaze_point_and_direction.certainty
                        == 0.0
                    ) and (
                        self.last_left_cam_gaze_point_and_direction.certainty
                        == 0.0):
                    self.last_gaze_point_wf = Vector3(0, 0, 0)
                    self.last_head_position_wf = Vector3(0, 0, 0)
                    self.last_hfv_wf = Vector3(0, 0, 0)
                    self.last_gaze_point_certainty = 0.0
                elif (self.last_right_cam_user_head_certainty == 0.0 or
                      self.last_right_cam_gaze_point_and_direction.certainty <=
                      self.last_left_cam_gaze_point_and_direction.certainty):
                    self.last_gaze_point_wf = self.last_left_cam_gaze_point_and_direction.gaze_point
                    self.last_head_position_wf = self.last_left_cam_gaze_point_and_direction.head_position
                    self.last_hfv_wf = self.last_left_cam_gaze_point_and_direction.hfv
                    self.last_gaze_point_certainty = self.last_left_cam_gaze_point_and_direction.certainty
                elif (self.last_left_cam_user_head_certainty == 0.0
                      or self.last_right_cam_gaze_point_and_direction.certainty
                      > self.last_left_cam_gaze_point_and_direction.certainty):
                    self.last_gaze_point_wf = self.last_right_cam_gaze_point_and_direction.gaze_point
                    self.last_head_position_wf = self.last_right_cam_gaze_point_and_direction.head_position
                    self.last_hfv_wf = self.last_right_cam_gaze_point_and_direction.hfv
                    self.last_gaze_point_certainty = self.last_right_cam_gaze_point_and_direction.certainty
        elif self.fusion_mode == 'right':
            with self.lock_right_cam_user_gaze_point:
                self.last_gaze_point_wf = self.last_right_cam_gaze_point_and_direction.gaze_point
                self.last_head_position_wf = self.last_right_cam_gaze_point_and_direction.head_position
                self.last_hfv_wf = self.last_right_cam_gaze_point_and_direction.hfv
                self.last_gaze_point_certainty = self.last_right_cam_gaze_point_and_direction.certainty
        elif self.fusion_mode == 'left':
            with self.lock_left_cam_user_gaze_point:
                self.last_gaze_point_wf = self.last_left_cam_gaze_point_and_direction.gaze_point
                self.last_head_position_wf = self.last_left_cam_gaze_point_and_direction.head_position
                self.last_hfv_wf = self.last_left_cam_gaze_point_and_direction.hfv
                self.last_gaze_point_certainty = self.last_left_cam_gaze_point_and_direction.certainty
                self.parent_last_gaze_point_wf = self.parent_last_left_cam_gaze_point_and_direction.gaze_point
                self.parent_last_head_position_wf = self.parent_last_left_cam_gaze_point_and_direction.head_position
                self.parent_last_hfv_wf = self.parent_last_left_cam_gaze_point_and_direction.hfv
                self.parent_last_gaze_point_certainty = self.parent_last_left_cam_gaze_point_and_direction.certainty

        # publish gaze point and direction in wf
        gpd = GazePointAndDirection()
        # certainties for gaze point and head position should be the same
        gpd.certainty = self.last_gaze_point_certainty
        gpd.gaze_point = self.last_gaze_point_wf
        gpd.head_position = self.last_head_position_wf
        gpd.hfv = self.last_hfv_wf
        gpd.role_confidence = self.last_left_cam_gaze_point_and_direction.role_confidence
        gpd.role = gpd.CHILD_ROLE

        # publish gaze point and direction in wf
        parent_gpd = GazePointAndDirection()
        # certainties for gaze point and head position should be the same
        parent_gpd.certainty = self.parent_last_gaze_point_certainty
        parent_gpd.gaze_point = self.parent_last_gaze_point_wf
        parent_gpd.head_position = self.parent_last_head_position_wf
        parent_gpd.hfv = self.parent_last_hfv_wf
        parent_gpd.role_confidence = self.parent_last_left_cam_gaze_point_and_direction.role_confidence
        parent_gpd.role = parent_gpd.PARENT_ROLE

        gpd_message = GazePointsAndDirections()
        gpd_message.gazes.append(gpd)
        gpd_message.gazes.append(parent_gpd)

        #print "[py] gpd certainty %2f gaze point x %2f gaze point y %2f gaze point z %2f" % (gpd.certainty, gpd.gaze_point.x, gpd.gaze_point.y, gpd.gaze_point.z)
        self.fused_gaze_point_wf_pub.publish(gpd_message)
def readXML(file):
    tree = ET.parse(file)
    root = tree.getroot()

    item = root.findall('./tracklets/item')

    d = {}
    boxes_2d = {}
    pictograms = {}

    for i, v in enumerate(item):
        h = float(v.find('h').text)
        w = float(v.find('w').text)
        l = float(v.find('l').text)
        frame = int(v.find('first_frame').text)
        size = Vector3(l, w, h)

        label = v.find('objectType').text

        pose = v.findall('./poses/item')

        for j, p in enumerate(pose):
            tx = float(p.find('tx').text)
            ty = float(p.find('ty').text)
            tz = float(p.find('tz').text)
            rz = float(p.find('rz').text)
            occlusion = float(p.find('occlusion').text)
            q = tf.transformations.quaternion_from_euler(0.0, 0.0, rz)

            b = BoundingBox()
            b.pose.position = Vector3(tx, ty, tz / 2.0)
            b.pose.orientation = Quaternion(*q)
            b.dimensions = size
            b.label = i

            picto_text = Pictogram()
            picto_text.mode = Pictogram.STRING_MODE
            picto_text.pose.position = Vector3(tx, ty, -tz / 2.0)
            q = tf.transformations.quaternion_from_euler(0.7, 0.0, -0.7)
            picto_text.pose.orientation = Quaternion(0.0, -0.5, 0.0, 0.5)
            picto_text.size = 5
            picto_text.color = std_msgs.msg.ColorRGBA(1, 1, 1, 1)
            picto_text.character = label

            # Bounding Box corners
            corner_x = np.array(
                [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2])
            corner_y = np.array(
                [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2])
            corner_z = np.array([0, 0, 0, 0, h, h, h, h])
            rz = wrapToPi(rz)

            ###################
            #create box on origin, then translate and rotate according to pose. finally, project into 2D image
            # Rotate and translate 3D bounding box in velodyne coordinate system
            R = np.array([[math.cos(rz), -math.sin(rz), 0],
                          [math.sin(rz), math.cos(rz), 0], [0, 0, 1]])
            corner_3d = np.dot(R, np.array([corner_x, corner_y, corner_z]))
            #Translate
            corner_3d[0, :] = corner_3d[0, :] + tx
            corner_3d[1, :] = corner_3d[1, :] + ty
            corner_3d[2, :] = corner_3d[2, :] + tz

            #Project to 2D
            low_row = np.vstack(
                [corner_3d,
                 np.ones(corner_3d.shape[1], dtype=np.float)])
            corner_3d = np.dot(np.asarray(rt_matrix), low_row)

            #################################
            #Create an orientation vector
            orientation_3d = np.dot(R, np.array([[0, 0.7 * l], [0, 0], [0,
                                                                        0]]))
            #Translate
            orientation_3d[0, :] = orientation_3d[0, :] + tx
            orientation_3d[1, :] = orientation_3d[1, :] + ty
            orientation_3d[2, :] = orientation_3d[2, :] + tz
            #Project
            low_row = np.vstack([
                orientation_3d,
                np.ones(orientation_3d.shape[1], dtype=np.float)
            ])
            orientation_3d = np.dot(rt_matrix, low_row)

            K = np.asarray(cam_to_cam['P_rect_02']).reshape(3, 4)
            K = K[:3, :3]

            corners_2d = projectToImage(corner_3d, K)
            orientation_2d = projectToImage(orientation_3d, K)

            x1 = min(corners_2d[0, :])
            x2 = max(corners_2d[0, :])
            y1 = min(corners_2d[1, :])
            y2 = max(corners_2d[1, :])

            bbox_2d = ImageRect()
            bbox_2d.score = -10.0

            if ((label == 'Car' or label == 'Truck' or label == 'Van')
                    and np.any(corner_3d[2, :] >= 0.5)) and (
                        np.any(orientation_3d[2, :] >= 0.5) and x1 >= 0
                        and x2 >= 0 and y1 > 0 and y2 >= 0 and occlusion < 2):
                bbox_2d.x = x1
                bbox_2d.y = y1
                bbox_2d.width = x2 - x1
                bbox_2d.height = y2 - y1
                bbox_2d.score = 1.0

            if d.has_key(frame + j) == True:
                d[frame + j].append(b)
                boxes_2d[frame + j].append(bbox_2d)
                pictograms[frame + j].append(picto_text)
            else:
                d[frame + j] = [b]
                boxes_2d[frame + j] = [bbox_2d]
                pictograms[frame + j] = [picto_text]

    return d, boxes_2d, pictograms
Ejemplo n.º 24
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # Prepare state vector (pose only; ignore angular_linear_acceleration)
        position = np.array(
            [pose.position.x, pose.position.y, pose.position.z])
        orientation = np.array([
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])

        if self.last_timestamp is None:
            velocity = np.array([0.0, 0.0, 0.0])
        else:
            velocity = (position - self.last_position) / max(
                abs(timestamp - self.last_timestamp), 1e-05)

        # set state variale
        state = np.concatenate([position, orientation,
                                velocity])  # add in velocity

        self.last_timestamp = timestamp  # Reset instance variables
        self.last_position = position
        done = False  # initilize done

        ###############################
        # CALCULATE INDIVIDUAL ERRORS #
        ###############################

        x = np.linalg.norm(np.array([0.0, 0.0, 10.0]) -
                           state[0:3])  # distance between copter and origin
        o = np.linalg.norm(np.array([0.0, 0.0, 0.0, 0.0]) -
                           state[3:7])  # NOT NEEDED BECAUSE SPACE CONSTRAINED
        v = np.linalg.norm(np.array([0.0, 0.0, 0.0]) -
                           state[7:10])  # velocity magnitude

        v_error = ((x * 3)**2 + v**2)
        x_error = abs(x**2)  # error is abs distance from 0.0
        # v_error = abs(v)/(abs(x) + 0.1)                        # prevent divide by 0

        reward = -v_error  # x and v errors

        if state[2] == 10.0:  # if the quadcopter hits target
            reward += 50  # give give reward

        if timestamp > self.max_duration:  # if time limit is exceeded
            done = True  # end current episode

        if x > self.max_distance:  # if max distance is exceeded
            done = True  # end current episode
            reward -= 100  # give penalty

        reward = (1 / 100) * reward  # scale down so no exploding gradients

        action = self.agent.step(state, reward, done)

        ###################################################################
        # Convert to proper force command (a Wrench object) and return it #
        ###################################################################
        if action is not None:
            # print ("Action is not None!")
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            print("Action is None?")
            return Wrench(), done
 def apply_control(self, rot_speed):
     drive_msg = Twist()
     drive_msg.linear = Vector3(0, 0, 0)
     drive_msg.angular = Vector3(0, 0,
                                 rot_speed)  # "+" <--> CCW | "-" <--> CW
     self.drive_pub.publish(drive_msg)
Ejemplo n.º 26
0
def add(v1, v2):
    return Vector3(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z)
Ejemplo n.º 27
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        position = np.array(
            [pose.position.x, pose.position.y, pose.position.z])
        orientation = np.array([
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])

        if self.last_timestamp is None:
            velocity = np.array([0.0, 0.0, 0.0])
        else:
            velocity = (position - self.last_position) / max(
                timestamp - self.last_timestamp,
                1e-03)  # prevent divide by zero
        state = np.concatenate([position, orientation, velocity])
        self.last_timestamp = timestamp
        self.last_position = position
        # state = np.array([
        #         pose.position.x, pose.position.y, pose.position.z,
        #         pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])

        # Compute reward / penalty and check if this episode is complete
        done = False
        error_position = np.linalg.norm(
            self.target_position -
            state[0:3])  # Euclidean distance from target position vercotr
        error_orientation = np.linalg.norm(
            self.target_orientation - state[3:7]
        )  # Euclidean distance from target orientation quaternion
        error_velocity = np.linalg.norm(
            self.target_velocity -
            state[7:10])  # Euclidean distance from target velocity vecotr
        # import math
        # error_position = math.pow(error_position, 3)
        # error_orientation = math.pow(error_orientation, 3)
        # error_velocity = math.pow(error_velocity, 3)

        reward = -(self.weight_position * error_position +
                   self.weight_orientation * error_orientation +
                   self.weight_velocity * error_velocity)

        if error_position > self.max_error_position:
            reward -= 50.0  # extra penalty, agent strayed too far
            print('update(): done - error position({})'.format(state[0:3]))
            done = True
        elif timestamp > self.max_duration:
            print('update(): done - extra reward')
            reward += 50.0  # extra reward, agent made it to the end
            done = True

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward,
                                 done)  # note: action = <force; torque> vector

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            return Wrench(), done
Ejemplo n.º 28
0
def subtract(v1, v2):
    return Vector3(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z)
Ejemplo n.º 29
0
        degree = degree - int(data.angular.z)

        dt = (current_time - last_time).to_sec()
        delta_x = (data.linear.x * np.cos(th) -
                   data.linear.y * np.sin(th)) * dt
        delta_y = (data.linear.x * np.sin(th) +
                   data.linear.y * np.cos(th)) * dt
        delta_th = data.angular.z * dt

        x += delta_x
        y += delta_y
        th += delta_th

        q = quaternion_from_euler(0, 0, th)

        odom.twist.twist = Twist(Vector3(data.linear.x, data.linear.y, 0),
                                 Vector3(0, 0, data.angular.z))

        if degree < 1:
            degree = 360
    except:
        pass

    base_laser_link_broadcaster.sendTransform((0, 0, 0.2),
                                              quaternion_from_euler(0, 0, 0),
                                              current_time - start_time,
                                              "base_laser_link", "base_link")

    base_link_broadcaster.sendTransform((0, 0, 0),
                                        quaternion_from_euler(0, 0, 0),
                                        current_time - start_time, "base_link",
Ejemplo n.º 30
0
	def send_odom(self):

		time_encA_last=self.time_encA
		time_encB_last=self.time_encB

		steps_encA_last=self.steps_encA
		steps_encB_last=self.steps_encB

		wd=0.0
		wi=0.0

		v=0.0
		omega=0.0

		vx=0.0
		vy=0.0
		vth=0.0

		x=0.0
		y=0.0
		th=0.0


		r = rospy.Rate(40.0)
		while not rospy.is_shutdown():


			current_time=rospy.Time.now()

			dtA=(self.time_encA-time_encA_last)*(10**-4) #100us
			dtB=(self.time_encB-time_encB_last)*(10**-4) #100us




			if((dtA!=0 and dtB!=0)):

				dtA=(self.time_encA-time_encA_last)*(10**-4) #100us
				dtB=(self.time_encB-time_encB_last)*(10**-4) #100us

				dstepsA=self.steps_encA-steps_encA_last
				dstepsB=self.steps_encB-steps_encB_last

				#Guardiar variables actuales
				time_encA_last=self.time_encA
				time_encB_last=self.time_encB

				steps_encA_last=self.steps_encA
				steps_encB_last=self.steps_encB


				#Velocidad angular

				wd=(dstepsA/self.pasos_vuelta)*2*pi/dtA
				wi=(dstepsB/self.pasos_vuelta)*2*pi/dtB



				#Cinematica inversa

				v=(wd+wi)*self.radio/2
				omega=(wd-wi)*self.radio/self.D



				#Dead reconing ---->Mirar si ambos tiempos son los mismos?
				vx =v*cos(th)
				vy =v*sin(th)
				vth =omega

				x +=vx*dtA
				y +=vy*dtA
				th +=vth*dtA



			# since all odometry is 6DOF we'll need a quaternion created from yaw
			odom_quat = tf.transformations.quaternion_from_euler(0, 0, th)

			# first, we'll publish the transform over tf
			self.odom_broadcaster.sendTransform(
				(x, y, 0.),
				odom_quat,
				current_time,
				"base_link",
				"odom"
			)


			# next, we'll publish the odometry message over ROS
			odom = Odometry()
			odom.header.stamp = current_time
			odom.header.frame_id = "odom"

			# set the position
			odom.pose.pose = Pose(Point(x, y, 0.), Quaternion(*odom_quat))

			# set the velocity
			odom.child_frame_id = "base_link"
			odom.twist.twist = Twist(Vector3(vx, vy, 0), Vector3(0, 0, vth))

			# publish the message
			self.odom_pub.publish(odom)


			#Log datos

			#rospy.loginfo("tA " + str(time_encA_last) + " tB " + str(time_encB_last)
			#	+ " stepsA " + str(steps_encA_last) + " stepsB " + str(steps_encB_last)
			#	+ " dt " +str(dtA) + " dstep " +str(dstepsA)
			#	+ " wi "+ str(wi) + " wd " + str(wd)
			#	+ " v " +str(v) + " omega " + str(omega)
			#	+ " x " + str(x) + " y " + str(y) + " th " + str(th))






			r.sleep()