示例#1
0
def talker():
    pub = rospy.Publisher('path', Float32MultiArray, queue_size=10)
    rospy.init_node('path_planner', anonymous=True)
    rate = rospy.Rate(1)  # 1hz

    waypoints = Float32MultiArray()
    waypoints.layout.dim.append(MultiArrayDimension())
    waypoints.layout.dim.append(MultiArrayDimension())

    waypoints.layout.dim[0].label = "Length"
    #waypoints.layout.dim[0].size = 18
    length = waypoints.layout.dim[0].size
    waypoints.layout.dim[0].stride = length * 3
    waypoints.layout.dim[1].label = "Joints"
    waypoints.layout.dim[1].size = 3
    waypoints.layout.dim[1].stride = 3
    waypoints.layout.data_offset = 0

    while not rospy.is_shutdown():
        waypoints.data = [0] * (length)
        #for i in range(0, length):
        #    waypoints.data[i] = i + 1
        #waypoints.data = [-1.44, 1.4, 0.6, -1.57, 0, -1.57, -1.0, 0.7, 0, -0.5, 0.7, -1.0, -0.3, 0.5, -0.5, 0, 0, 0, -0.3, 0.5, -0.5, 0, 0, 0]
        waypoints.data = [-1.57, 0, 0]
        waypoints.layout.dim[0].size = len(waypoints.data)
        rospy.loginfo(waypoints)
        pub.publish(waypoints)
        rate.sleep()
        print("\nTask completed\n")

        rospy.signal_shutdown("Task completed")
示例#2
0
def main():
    global pub
    
    rospy.init_node('go_to_point')
    
    pub = rospy.Publisher('/diff_drive_controller/cmd_vel', Twist, queue_size=1)
    status_pub = rospy.Publisher('/move_bot/status', Bool, queue_size=1)
    sub_odom = rospy.Subscriber('/odom',Odometry, clbk_odom)
    target_sub = rospy.Subscriber('/move_bot/goal',Pose, clbk_goal)
    mat = Float64MultiArray()
    mat.layout.dim.append(MultiArrayDimension())
    mat.layout.dim.append(MultiArrayDimension())
    gripper_pub = rospy.Publisher('/gripper_controller/command', Float64MultiArray, queue_size=1)
    rate =rospy.Rate(200)
    extend = 0
    gripper_extend = 0
    global message_arrived
    while not rospy.is_shutdown():
        if message_arrived:
            if state_ == 0:
                fix_yaw(desired_position_)
            elif state_ == 1:
                go_straight_ahead(desired_position_)
            elif state_ == 2:
                orient_yaw(yaw_required)
            elif state_ ==3:
                done()
                change_state(0)
                message_arrived = False
                status = True
                status_msg = Bool()
                status_msg.data = status
                status_pub.publish(status_msg)
                print("done")
示例#3
0
    def __init__(self):

        # Define the dimensions of the message
        rightHandGoalDim = MultiArrayDimension()
        rightHandGoalDim.size = NUM_DOFS
        rightHandGoalDim.label = "rightHandGoal"
        rightHandGoalDim.stride = 1

        # Define the goal messages
        self.rightHandGoalMsg = Float64MultiArray()
        for ii in range(0, NUM_DOFS):
            self.rightHandGoalMsg.data.append(0)
        self.rightHandGoalMsg.layout.dim.append(rightHandGoalDim)
        self.rightHandGoalMsg.layout.data_offset = 0

        leftHandGoalDim = MultiArrayDimension()
        leftHandGoalDim.size = NUM_DOFS
        leftHandGoalDim.label = "leftHandGoal"
        leftHandGoalDim.stride = 1

        self.leftHandGoalMsg = Float64MultiArray()
        for ii in range(0, NUM_DOFS):
            self.leftHandGoalMsg.data.append(0)
        self.leftHandGoalMsg.layout.dim.append(leftHandGoalDim)
        self.leftHandGoalMsg.layout.data_offset = 0
示例#4
0
    def callback(self, joint_state):
        if len(self.joint_position) == 0:
            self.joint_position = np.array(joint_state.position)
        else:
            if np.all(self.joint_position == joint_state.position):
                return
            else:
                self.joint_position = copy.deepcopy(joint_state.position)
        print self.joint_position
        mat = UInt16MultiArray()
        mat.layout.dim.append(MultiArrayDimension())
        mat.layout.dim.append(MultiArrayDimension())
        mat.layout.dim[0].label = "servo_number"
        mat.layout.dim[0].size = 6
        mat.layout.dim[0].stride = 3 * 6
        mat.layout.dim[1].label = "single_command"
        mat.layout.dim[1].size = 3
        mat.layout.dim[1].stride = 3
        mat.layout.data_offset = 0
        mat.data = [
            1, 1500, 1000, 2, 1500, 1000, 3, 1500, 1000, 4, 1500, 1000, 5,
            1500, 1000, 6, 1500, 1000
        ]

        dstride1 = mat.layout.dim[1].stride
        offset = mat.layout.data_offset
        for i in range(6):
            if i == 1 or i == 2:
                PMW_value = -self.joint_position[i] / 1.5708 * 1000 + 1500
            else:
                PMW_value = self.joint_position[i] / 1.5708 * 1000 + 1500
            mat.data[offset + 1 + i * dstride1] = PMW_value
        print mat
        self.PMW_value_pub.publish(mat)
示例#5
0
def image_to_saliency(t, saliency, saliency_pub, saliency_image_pub, bridge,
                      image, last_time, elapsed):
    if t < 1.0:
        return

    if image.value is None:
        return

    if last_time.value is None:
        last_time.value = t
    current_time = t
    dt = current_time - last_time.value
    last_time.value = current_time

    elapsed.value = elapsed.value + dt
    if elapsed.value < 0.01:
        return
    else:
        elapsed.value = 0.

    image = bridge.value.imgmsg_to_cv2(image.value, "bgr8")
    saliency_map = saliency.value.compute_saliency_map(image)
    saliency_map_image = bridge.value.cv2_to_imgmsg(
        np.uint8(saliency_map * 255.), "mono8")
    saliency_image_pub.value.publish(saliency_map_image)

    from std_msgs.msg import Float32MultiArray, MultiArrayDimension, MultiArrayLayout
    height = MultiArrayDimension(size=len(saliency_map))
    width = MultiArrayDimension(size=len(saliency_map[0]))
    lo = MultiArrayLayout([height, width], 0)
    saliency_pub.value.publish(
        Float32MultiArray(layout=lo, data=saliency_map.flatten()))
def image_to_saliency(t, image, bridge, saliency, saliency_pub,
                      saliency_image_pub, points, camera_model,
                      camera_info_left, last_time, elapsed, pan, tilt,
                      saliency_map):
    if image.value is None or camera_info_left.value is None:
        return

    if last_time.value is None:
        last_time.value = t
    current_time = t
    dt = current_time - last_time.value
    last_time.value = current_time

    elapsed.value = elapsed.value + dt
    if elapsed.value < 0.01:
        return
    else:
        elapsed.value = 0.

    if saliency_map.value is None:
        image = bridge.value.imgmsg_to_cv2(image.value, "bgr8")
        image = np.zeros(image.shape)
        saliency_map.value = saliency.value.compute_saliency_map(image)

    saliency_map_current = saliency_map.value.copy()

    # apply curiosity
    camera_model.value.fromCameraInfo(camera_info_left.value)
    for point in points.value:
        # call service
        pixel = camera_model.value.project3dToPixel(
            (point.point.x - pan.value, point.point.y - tilt.value,
             point.point.z))
        x = int(pixel[0] * (len(saliency_map_current[0]) /
                            float(camera_info_left.value.width)))
        x = x + 6  # correction, bug in opencv?
        y = int(
            pixel[1] *
            (len(saliency_map_current) / float(camera_info_left.value.height)))
        if x >= 0 and x < len(saliency_map_current[0]) and y >= 0 and y < len(
                saliency_map_current):
            from skimage.draw import circle
            rr, cc = circle(
                y, x, 25,
                (len(saliency_map_current), len(saliency_map_current[0])))
            saliency_map[rr, cc] = saliency_map[rr, cc] * min(
                1, (t - point.header.stamp.to_sec()))

    saliency_map_image = bridge.value.cv2_to_imgmsg(
        np.uint8(saliency_map_current * 255.), "mono8")
    saliency_image_pub.value.publish(saliency_map_image)

    from std_msgs.msg import Float32MultiArray, MultiArrayDimension, MultiArrayLayout
    height = MultiArrayDimension(size=len(saliency_map_current))
    width = MultiArrayDimension(size=len(saliency_map_current[0]))
    lo = MultiArrayLayout([height, width], 0)
    saliency_pub.value.publish(
        Float32MultiArray(layout=lo, data=saliency_map_current.flatten()))

    return
示例#7
0
def think(msgs):
    global curx, cury, positions, r
    print(msgs.data)
    data = [int(x) for x in msgs.data]
    idxs = sorted(range(9), key=lambda k:data[k])
    mat = Float32MultiArray()
    mat.layout.dim.append(MultiArrayDimension())
    mat.layout.dim.append(MultiArrayDimension())
    mat.layout.dim[0].label = "height"
    mat.layout.dim[1].label = "width"
    mat.layout.dim[0].size = 2
    mat.layout.dim[1].size = 9
    mat.data = [0]*18
    print(idxs)
    for i in range(len(idxs)):
        pos = positions[idxs[i]]
        print(pos)
        tx = pos[0] - curx
        ty = pos[1] - cury
        curx = pos[0]
        cury = pos[1]
        mat.data[i * 2] = tx
        mat.data[i * 2 + 1] = ty
    print(mat)
    pub.publish(mat)
    r.sleep()
def demo():
    '''
    Publish sample data to ROS
    '''

    # Read demo binvox as (64*64*64) array
    with open('demo/occupy.binvox', 'rb') as f:
        occ = binvox_rw.read_as_3d_array(f).data
    with open('demo/non_occupy.binvox', 'rb') as f:
        non = binvox_rw.read_as_3d_array(f).data

    msg = ByteMultiArray()
    msg.data = (occ.astype(int) - non.astype(int)).flatten().tolist()
    msg.layout.dim.append(
        MultiArrayDimension(label='x', size=DIM, stride=DIM * DIM * DIM))
    msg.layout.dim.append(
        MultiArrayDimension(label='y', size=DIM, stride=DIM * DIM))
    msg.layout.dim.append(MultiArrayDimension(label='z', size=DIM, stride=DIM))

    rospy.init_node('shape_demo_loader')
    pub = rospy.Publisher('local_occupancy',
                          numpy_msg(ByteMultiArray),
                          queue_size=10)
    rospy.Subscriber("local_occupancy_predicted", numpy_msg(ByteMultiArray),
                     callback)

    time.sleep(5)
    pub.publish(msg)
    rospy.spin()
示例#9
0
    def im_callback(self, data):
        outputs, im = self.objsegmenter.segment(data, y_crop_idx=650)
        labels = self.objsegmenter.get_labels(outputs)
        box_list = np.asarray(
            self.objsegmenter.get_boxes(outputs).tensor.tolist())
        if self.visualize:
            out = self.objsegmenter.draw_output(outputs, im)
            # Publish segmented image
            self.image_pub.publish(cv2_to_imgmsg(out.get_image(), 'bgr8'))

        # Publish segmentation data as Segmentation message
        seg_msg = Segmentation()
        # Add labels
        seg_msg.classes = labels
        # Flatten data into 1d array
        seg_msg.boxes.data = list(box_list.flatten())
        # Populate layout info
        dims = box_list.shape
        if len(dims) > 1:
            seg_msg.boxes.layout.dim = [
                MultiArrayDimension(),
                MultiArrayDimension()
            ]
            seg_msg.boxes.layout.dim[0].label = 'classes'
            seg_msg.boxes.layout.dim[0].size = dims[0]
            seg_msg.boxes.layout.dim[0].stride = dims[0] * dims[1]
            seg_msg.boxes.layout.dim[1].label = 'bbox coordinates'
            seg_msg.boxes.layout.dim[1].size = dims[1]
            seg_msg.boxes.layout.dim[1].stride = dims[1]
            print(seg_msg.classes)
            print(seg_msg.boxes.data)
        else:
            print('No objects found')
        self.segment_pub.publish(seg_msg)
示例#10
0
def vox_to_float_array(voxel_grid, dim):
    out_msg = Float32MultiArray()
    out_msg.data = voxel_grid.astype(np.float32).flatten().tolist()
    out_msg.layout.dim.append(MultiArrayDimension(label='x', size=dim, stride=dim*dim*dim))
    out_msg.layout.dim.append(MultiArrayDimension(label='y', size=dim, stride=dim*dim))
    out_msg.layout.dim.append(MultiArrayDimension(label='z', size=dim, stride=dim))
    return out_msg
示例#11
0
    def determine_vision_pattern(self):
        resolution = self.occ_grid.info.resolution
        sensor_range = self.sensor.range_max

        dimension = 2 * int(
            np.round(math.ceil((sensor_range) / resolution - 0.5))) + 1
        self.vision_pattern.data = []

        self.vision_pattern.layout.dim.append(MultiArrayDimension())
        self.vision_pattern.layout.dim.append(MultiArrayDimension())
        self.vision_pattern.layout.dim[0].size = dimension
        self.vision_pattern.layout.dim[1].size = dimension
        self.vision_pattern.layout.dim[0].label = "rows"
        self.vision_pattern.layout.dim[1].label = "cols"
        self.vision_pattern.layout.data_offset = 0
        centre_index = (dimension + 1) / 2 - 1  # -1 since index starts at 0

        for i in range(0, dimension):
            for j in range(0, dimension):
                distance = resolution * np.sqrt((i - centre_index)**2 +
                                                (j - centre_index)**2)
                if distance <= sensor_range:
                    self.vision_pattern.data.append(1)
                else:
                    self.vision_pattern.data.append(0)
示例#12
0
def handle_p2b(req):
    global theta
    print("here")
    theta = list(req.configs.angles)
    delta = 0.0001
    f_original = computeH(0, 0, 0, 0)
    f_theta1 = computeH(delta, 0, 0, 0)
    f_theta2 = computeH(0, delta, 0, 0)
    f_theta3 = computeH(0, 0, delta, 0)
    f_theta4 = computeH(0, 0, 0, delta)
    J1 = (f_theta1 - f_original) / delta
    J2 = (f_theta2 - f_original) / delta
    J3 = (f_theta3 - f_original) / delta
    J4 = (f_theta4 - f_original) / delta

    jacobian = Float64MultiArray()
    jacobian.layout.dim = [
        MultiArrayDimension('height', 4, 3 * 4),
        MultiArrayDimension('width', 3, 3)
    ]
    jacobian.data = [
        J1[0], J2[0], J3[0], J4[0], J1[1], J2[1], J3[1], J4[1], J1[2], J2[2],
        J3[2], J4[2]
    ]
    return jacobian
示例#13
0
def callback(data):
    #rospy.loginfo(data)
    rownum=data.layout.dim[0].size
    columnnum=data.layout.dim[1].size
    dataset=data.data
    subarrayset=[]
    #count=0
    for i in range(0,rownum-1):
        temp=[]
        for k in range(0,columnnum-1):
            temp.append(dataset[rownum*i+k])
        (x,y,z)=findsubarray(temp)
        #rospy.loginfo((x,y,z))
        for j in range(x,y):
            subarrayset.append(dataset[rownum*i+j])
            #count=count+y-x+1
            #rospy.loginfo(count)
    #rospy.loginfo(subarrayset)
    (begin,end,summary)=findsubarray(subarrayset)
    rospy.loginfo((begin,end,summary))
    outputdata=[begin,end]
    pub=rospy.Publisher('/hw1/subarray', UInt32MultiArray, queue_size=10)
    layout = MultiArrayLayout()
    layout.dim = [MultiArrayDimension('height', 1, 2 * 1),
                MultiArrayDimension('width', 2, 2)]
    pub.publish(layout, outputdata)
def omni_talker():
    pub = rospy.Publisher('Omni_data', Float32MultiArray, queue_size=10)
    rospy.init_node('Omni_talker', anonymous=False)
    rate = rospy.Rate(19) # 11hz

    while not rospy.is_shutdown():

        # random data for debug and test
        _Omni_data =  Float32MultiArray()
        _Omni_data.data = np.random.random((3,3)).astype(np.float32).reshape([9])
        _Omni_data.layout.data_offset = 0
        # create two dimensions in the dim array
        _Omni_data.layout.dim = [MultiArrayDimension(), MultiArrayDimension()]
        '''
        The stride in these dimensions refers to how many samples along the next
        higher dimension is. So in the case of the samples, there is a stride of
        4096 to the next sample. In the case of dimension zero there is no higher
        dimension so the stride is the overall size of the message data.
        '''
        # dim[0] is the vertical dimension of your matrix
        _Omni_data.layout.dim[0].label = "vertical"
        _Omni_data.layout.dim[0].size = 3
        _Omni_data.layout.dim[0].stride = 3
        # dim[1] is the horizontal dimension of your matrix
        _Omni_data.layout.dim[1].label = "horizontal"
        _Omni_data.layout.dim[1].size = 3
        _Omni_data.layout.dim[1].stride = 3

        rospy.loginfo("python: Omni_data sent")
        # rospy.loginfo(_Omni_data) # for test only
        pub.publish(_Omni_data)
        rate.sleep()
示例#15
0
    def test_strify_message(self):
        from genpy.message import Message, strify_message, fill_message_args
        def roundtrip(m):
            yaml_text = strify_message(m)
            print(yaml_text)
            loaded = yaml.load(yaml_text) 
            print("loaded", loaded)
            new_inst = m.__class__()
            if loaded is not None:
                fill_message_args(new_inst, [loaded])
            else:
                fill_message_args(new_inst, [])                
            return new_inst

        # The following tests have not been ported to genpy yet

        # test array of Messages field. We can't use M4 or M5 because fill_message_args has to instantiate the embedded type
        from test_roslib_comm.msg import ArrayOfMsgs
        from std_msgs.msg import String, Time, MultiArrayLayout, MultiArrayDimension
        dims1 = [MultiArrayDimension(*args) for args in [('', 0, 0), ('x', 1, 2), ('y of z', 3, 4)]]
        dims2 = [MultiArrayDimension('hello world', 91280, 1983274)]
        times = [Time(genpy.Time(*args)) for args in [(0,), (12345, 6789), (1, 1)]]
        val = ArrayOfMsgs([String(''), String('foo'), String('bar of soap')],
                          times,
                          [MultiArrayLayout(dims1, 0), MultiArrayLayout(dims2, 12354)],
                          )
        self.assertEquals(val, roundtrip(val))
 def publish_bounding_boxes(self, bbs, image_width, image_height):
     # Given a list of bounding boxes, publish same.
     # The detector returns an array of dlib.rectangle's.
     if len(bbs) > 0:
         msg = Int32MultiArray()
         msg.layout.data_offset = 0
         msg.layout.dim.append(
             MultiArrayDimension(label='height',
                                 size=len(bbs) + 1,
                                 stride=4 * (len(bbs) + 1)))
         msg.layout.dim.append(
             MultiArrayDimension(label='width', size=4, stride=1))
         msg.layout.dim.append(
             MultiArrayDimension(label='channel', size=1, stride=1))
         data = []
         # The first entry is the size of the source image
         data.append(0)
         data.append(0)
         data.append(image_width)
         data.append(image_height)
         for rect in bbs:
             data.append(rect.left())
             data.append(rect.top())
             data.append(rect.right())
             data.append(rect.bottom())
         msg.data = data
         self.bounding_box_publisher.publish(msg)
     return
示例#17
0
    def publish(self, rgb, temp_array):
        # Publish RGB image
        rgb_img_msg = self.bridge.cv2_to_imgmsg(rgb)
        rgb_img_msg.header.frame_id = self.evo_thermal_frame
        rgb_img_msg.header.stamp = rospy.Time.now()
        self.rgb_publisher.publish(rgb_img_msg)

        # Publish temperatures array
        raw_temp_array = Float64MultiArray()
        raw_temp_array.layout.dim.append(MultiArrayDimension())
        raw_temp_array.layout.dim[0].size = 32
        raw_temp_array.layout.dim[0].stride = 1
        raw_temp_array.layout.dim[0].label = "x-axis"
        raw_temp_array.layout.dim.append(MultiArrayDimension())
        raw_temp_array.layout.dim[1].size = 32
        raw_temp_array.layout.dim[1].stride = 1
        raw_temp_array.layout.dim[1].label = "y-axis"
        data = np.reshape(temp_array, (1024))
        np.round(data, 2)
        raw_temp_array.data = data
        self.raw_publisher.publish(raw_temp_array)

        # Publish internal sensor temperature
        self.ptat_publisher.publish(self.ptat / self.scaling -
                                    self.celsius_offset)
示例#18
0
    def __init__(self):
        #Initialize arbotix comunications
        print"\nArbotix initialization, wait 10 seconds..."
        ArbotiX.__init__(self)
        for x in xrange(1,21):
            time.sleep(0.5)
            print(str(x*0.5) + "/10s")



        #Setupr velocities and positions vectors and messages
        self.joints_poses = [0,0,0,0,0,0]
        self.joints_vels = [0,0,0,0,0]
        self.ee_closed = 0
        self.vels_to_pub = Float32MultiArray()
        self.poses_to_pub = Float32MultiArray()
        self.poses_layout = MultiArrayDimension('joints_poses', 6, 0)
        self.vels_layout = MultiArrayDimension('joints_vels', 5, 0)
        self.poses_to_pub.layout.dim = [self.poses_layout]
        self.poses_to_pub.layout.data_offset = 0
        self.vels_to_pub.layout.dim = [self.vels_layout]
        self.vels_to_pub.layout.data_offset = 0

        #ROS pubblisher for joint velocities and positions
        self.pos_pub = rospy.Publisher('/windowx_3links/joints_poses', Float32MultiArray, queue_size=1)
        self.vel_pub = rospy.Publisher('/windowx_3links/joints_vels', Float32MultiArray, queue_size=1)

        print"Windowx_3link node created, whaiting for messages in:"
        print"      windowx_2links/torque"
        print"Publishing joints' positions and velocities in:"
        print"      /windowx_3links/joints_poses"
        print"      /windowx_3links/joints_vels"
        #Start publisher
        self.publish()
示例#19
0
def to_multiarray(img_shape, boxes, scores, classes):
    # image_shape : the shape of the image used by yolo
    # boxes: An `array` of shape (num_boxes, 4) containing box corners as
    #     (y_min, x_min, y_max, x_max).
    # `scores`: A `list` of scores for each box.
    # classes: A `list` of indicies into `class_names`.
    # return: multiarray
    data = []
    for i in range(len(boxes)):
        y_min = boxes[i][0] / img_shape[0]
        x_min = boxes[i][1] / img_shape[1]
        y_max = boxes[i][2] / img_shape[0]
        x_max = boxes[i][3] / img_shape[1]
        confidence = scores[i]
        _class = classes[i]
        data.append(float(_class))
        data.append(float(y_min))
        data.append(float(x_min))
        data.append(float(y_max))
        data.append(float(x_max))
        data.append(float(confidence))
    message = Float32MultiArray()
    message.layout.dim.append(MultiArrayDimension("boxes", len(boxes), 1))
    message.layout.dim.append(MultiArrayDimension("box_data", 6, 1))
    message.data = data

    return message
def bno055_pub():
    rospy.init_node('bno055_pub', anonymous=True)
    pub = rospy.Publisher('bno055_state', Float32MultiArray, queue_size=1)
    rate = rospy.Rate(10)  # 10hz
    bno_controller = AdafruitBno055Controller()
    mat = Float32MultiArray()
    mat.layout.dim.append(MultiArrayDimension())
    mat.layout.dim.append(MultiArrayDimension())
    mat.layout.dim[0].label = "height"
    mat.layout.dim[1].label = "width"
    mat.layout.dim[0].size = 3
    mat.layout.dim[1].size = 1
    mat.layout.dim[0].stride = 3 * 1  #note dim[0] stride is just size of image
    mat.layout.dim[1].stride = 1
    mat.layout.data_offset = 0
    mat.data = [0] * 3
    while not rospy.is_shutdown():
        x, y, z = bno_controller.gravity()
        gravity_str = "gravity is{}".format([x, y, z])
        rospy.loginfo(gravity_str)
        mat.data[0] = x
        mat.data[1] = y
        mat.data[2] = z
        pub.publish(mat)
        rate.sleep()
示例#21
0
    def __init__(self, name):
        self.name = name
        self.position_stddev = 0.50  # [m]
        self.ref_frame = "/map"
        rospy.init_node('kf_example')
        self.position_stddev = rospy.get_param("~position_stddev_m",
                                               self.position_stddev)
        rospy.loginfo("Starting KF Example")
        self.array = Float64MultiArray()
        self.array.layout = MultiArrayLayout()
        self.array.layout.data_offset = 0
        self.array.layout.dim = [MultiArrayDimension('data', 3, 3)]
        self.array.data = [0.0] * 3

        self.array2 = Float64MultiArray()
        self.array2.layout = MultiArrayLayout()
        self.array2.layout.data_offset = 0
        self.array2.layout.dim = [MultiArrayDimension('measure', 2, 2)]
        self.array2.data = [0.0] * 2

        # Initialisation of the matrices
        self.A = numpy.mat([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
        self.P = numpy.mat([[1, 0, 0], [0, 1, 0], [0, 0, 3.5]])
        self.Q = numpy.mat([[pow(0.5, 2), 0, 0], [0, pow(0.5, 2), 0],
                            [0, 0, 0.001]])
        self.H = numpy.mat([[1, 0, 0], [0, 1, 0]])
        self.R = numpy.mat([[0.1, 0], [0, 0.1]])
        self.U = numpy.vstack([0.0, 0.0])

        # State is x, y, theta
        self.state = numpy.vstack([0.0, 0.0, 0.0])
        self.last_update = rospy.Time.now()
    def default(self, ci='unused'):
        arrayList = []
        arrayList.append(self.data['w_fl'])
        arrayList.append(self.data['w_fr'])
        arrayList.append(self.data['w_rl'])
        arrayList.append(self.data['w_rr'])
        dim_fl = MultiArrayDimension()
        dim_fl.size = sys.getsizeof(self.data['w_fl'])
        dim_fl.label = 'FrontLeftWheelSpeed'
        dim_fl.stride = sys.getsizeof(self.data['w_fl'])

        dim_fr = MultiArrayDimension()
        dim_fr.size = sys.getsizeof(self.data['w_fr'])
        dim_fr.label = 'FrontRightWheelSpeed'
        dim_fr.stride = sys.getsizeof(self.data['w_fr'])

        dim_rl = MultiArrayDimension()
        dim_rl.size = sys.getsizeof(self.data['w_rl'])
        dim_rl.label = 'RearLeftWheelSpeed'
        dim_rl.stride = sys.getsizeof(self.data['w_rl'])

        dim_rr = MultiArrayDimension()
        dim_rr.size = sys.getsizeof(self.data['w_rr'])
        dim_rr.label = 'RearRightWheelSpeed'
        dim_rr.stride = sys.getsizeof(self.data['w_rr'])
 

        wheelspeed = Float64MultiArray()
        wheelspeed.data = arrayList
        wheelspeed.layout.dim.append(dim_fl)
        wheelspeed.layout.dim.append(dim_fr)
        wheelspeed.layout.dim.append(dim_rl)
        wheelspeed.layout.dim.append(dim_rr)
        wheelspeed.layout.data_offset = 0
        self.publish(wheelspeed)
示例#23
0
def reconstruction(data):
    arduino_data = data.data

    # recosntruction

    #just to test dummy reconstruction
    #have to delete when the actual reconstruction is implemented
    arduino_data = np.array(arduino_data)
    arduino_data = arduino_data + 0.12

    #reconstruction end here

    # send message
    mat_size = m * n
    reconstruction_data = Float32MultiArray()
    reconstruction_data.layout.dim.append(MultiArrayDimension())
    reconstruction_data.layout.dim.append(MultiArrayDimension())
    reconstruction_data.layout.dim[0].label = "W"
    reconstruction_data.layout.dim[1].label = "H"
    reconstruction_data.layout.dim[0].size = m
    reconstruction_data.layout.dim[1].size = n
    reconstruction_data.layout.dim[0].stride = m * n
    reconstruction_data.layout.dim[1].stride = m
    reconstruction_data.layout.data_offset = 0
    reconstruction_data.data = arduino_data  #[0]* mat_size

    print("--------Start of Reconstruction data------")
    rospy.loginfo(reconstruction_data)
    print("---------End of Reconstruction data ------")
    pub.publish(reconstruction_data)
示例#24
0
文件: publish.py 项目: tloula/autonav
    def publish_depth(self, frames):
        depth_frame = frames.get_depth_frame()

        if self.DISPLAY:
            depth_image = np.asanyarray(
                self.colorizer.colorize(depth_frame).get_data())
            cv2.imshow("Original Depth", depth_image)
            cv2.waitKey(2)

        depth = np.asanyarray(depth_frame.get_data(), dtype=np.float64)
        depth = self.crop_depth(depth)

        # Create depth Float64MultiArray message to publish
        depth_data = Float64MultiArray()
        dim1 = MultiArrayDimension(label="height", size=depth.shape[0])
        dim2 = MultiArrayDimension(label="width", size=depth.shape[1])
        depth_data.layout.dim.extend([dim1, dim2])
        depth_data.data = depth.flatten()

        # Display Cropped Image
        if self.DANIEL_DEBUG and not self.DISPLAY:
            displayed_depth = np.asanyarray(
                self.colorizer.colorize(depth_frame).get_data())
            cv2.imshow("Published Depth", self.crop_depth(displayed_depth))
            cv2.waitKey(2)

        self.safe_publish(self.depth_pub, depth_data)
示例#25
0
def talker(mymode):
        
    if mymode=='float64':
        pub = rospy.Publisher('codio', array64, queue_size=1)        
        #    mydata=([[0, 0, 1, 250],[0, 1, 0, 15],[-1, 0, 0, -21]])
        mydata=([0, 0, 1, 250,0, 1, 0, 15,-1, 0, 0, -21])
        mymsg = array64()
    #    mymsg.data=mydata.reshape([12])
        mymsg.data=mydata
    #    mymsg.data = ([1.0, 2.0],[3.0, 4.0])
        mymsg.layout.data_offset = 0 
        mymsg.layout.dim = [MultiArrayDimension(), MultiArrayDimension()]
        # dim[1] is the vertical dimension of your matrix
        mymsg.layout.dim[0].label = "row"
        mymsg.layout.dim[0].size = 3
        mymsg.layout.dim[0].stride = 12
        # dim[1] is the horizontal dimension of your matrix
        mymsg.layout.dim[1].label = "col"
        mymsg.layout.dim[1].size = 4
        mymsg.layout.dim[1].stride = 4
    else:
        pub = rospy.Publisher('codio', Pose, queue_size=1)
        mymsg=Pose()
        mymsg.position.x=1
        mymsg.position.y=1
        mymsg.position.z=1
        mymsg.orientation.x=1
        mymsg.orientation.y=1
        mymsg.orientation.z=1
        mymsg.orientation.w=1
    rospy.init_node('talker', anonymous=True)
    rate = rospy.Rate(1) # 1hz
    while not rospy.is_shutdown():
        pub.publish(mymsg)
        rate.sleep()
 def test_dictionary_with_child_message(self):
     from std_msgs.msg import Float64MultiArray, MultiArrayLayout, MultiArrayDimension
     expected_message = Float64MultiArray(layout=MultiArrayLayout(
         dim=[
             MultiArrayDimension(label='Dimension1', size=12, stride=7),
             MultiArrayDimension(label='Dimension2', size=90, stride=8)
         ],
         data_offset=1),
                                          data=[1.1, 2.2, 3.3])
     dictionary = {
         'layout': {
             'dim': [{
                 'label': expected_message.layout.dim[0].label,
                 'size': expected_message.layout.dim[0].size,
                 'stride': expected_message.layout.dim[0].stride
             }, {
                 'label': expected_message.layout.dim[1].label,
                 'size': expected_message.layout.dim[1].size,
                 'stride': expected_message.layout.dim[1].stride
             }],
             'data_offset':
             expected_message.layout.data_offset
         },
         'data': expected_message.data
     }
     message = message_converter.convert_dictionary_to_ros_message(
         'std_msgs/Float64MultiArray', dictionary)
     expected_message = serialize_deserialize(expected_message)
     self.assertEqual(message, expected_message)
 def test_ros_message_with_child_message(self):
     from std_msgs.msg import Float64MultiArray, MultiArrayLayout, MultiArrayDimension
     expected_dictionary = {
         'layout': {
             'dim': [{
                 'label': 'Dimension1',
                 'size': 12,
                 'stride': 7
             }, {
                 'label': 'Dimension2',
                 'size': 24,
                 'stride': 14
             }],
             'data_offset':
             0
         },
         'data': [1.1, 2.2, 3.3]
     }
     dimension1 = MultiArrayDimension(
         label=expected_dictionary['layout']['dim'][0]['label'],
         size=expected_dictionary['layout']['dim'][0]['size'],
         stride=expected_dictionary['layout']['dim'][0]['stride'])
     dimension2 = MultiArrayDimension(
         label=expected_dictionary['layout']['dim'][1]['label'],
         size=expected_dictionary['layout']['dim'][1]['size'],
         stride=expected_dictionary['layout']['dim'][1]['stride'])
     layout = MultiArrayLayout(
         dim=[dimension1, dimension2],
         data_offset=expected_dictionary['layout']['data_offset'])
     message = Float64MultiArray(layout=layout,
                                 data=expected_dictionary['data'])
     message = serialize_deserialize(message)
     dictionary = message_converter.convert_ros_message_to_dictionary(
         message)
     self.assertEqual(dictionary, expected_dictionary)
示例#28
0
    def take_pictures_test(self):
        #input: None
        #output: None
        #Description: Used for validating, AI is working with camera or not
        
        #Getting the bird eye view of the image 
        #persImg = perspective(cv_image)
        persImg = skimage.io.imread(INPUT_TEST_IMAGE_PATH)
                 
        objCnt,objCord  = self.weedLocation(persImg)
        #Making up the object location data in format for publishing

        if objCnt !=0:
                objCord = np.reshape(objCord,(objCnt*3,))  #reshaping in the form of single column
                objCord = np.ndarray.tolist(objCord)
        else:
                objCord = np.reshape(objCord,(3,))  #reshaping in the form of single column
                objCord = np.ndarray.tolist(objCord)
       
        msg = Float32MultiArray()
        msg.layout.dim.append(MultiArrayDimension())
        msg.layout.dim.append(MultiArrayDimension())

        msg.layout.dim[0].label = "row"
        msg.layout.dim[1].label = "col"
        msg.layout.dim[0].size = objCnt
        msg.layout.dim[1].size = 1
        msg.layout.dim[0].stride = 3
        msg.layout.dim[1].stride = 1
        msg.layout.data_offset = 0        
        msg.data = objCord
        self.publisher.publish(msg)
示例#29
0
    def get_example_cov(example_id=0):
        """
        Creates an example covariance matrix. The data in the matrix was acquired from the maven-1.bag file.
        :param example_id: A constant id of the example in the list of stored examples. Minimum value is 0.
        Values above the max value will be set to 0 instead.
        Current maximum value is: 1
        :return: The covariance matrix in format std_msgs/Float64MultiArray
        """
        MAX_SAMPLES = 1
        if example_id > MAX_SAMPLES:
            example_id = 0
        nan = float("NaN")
        examples = []
        # Here, just copy-paste some more data from maven-1.bag so that you can choose which example you want
        examples.append([0.2256878004660412, 0.004743161046803848, nan, nan, nan, 0.4799785723084568, 6.694665570936009e-05, 0.004743161046804125, 0.03333087258142175, nan, nan, nan, 0.0064891656361629295, 0.08962944598353237, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.47997857230845664, 0.006489165636162725, nan, nan, nan, 2.4285886983849885, -0.06696549234373295, 6.694665570941213e-05, 0.08962944598353195, nan, nan, nan, -0.06696549234373322, 1.424665892676366])
        examples.append([0.5758368975539181, -0.10477581457466455, nan, nan, nan, 3.003595362397707, -0.39924730334819275, -0.10477581457466437, 0.24081097327513568, nan, nan, nan, -0.4539729065847597, 1.439606811146181, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 3.003595362397709, -0.45397290658476147, nan, nan, nan, 18.613535338951223, -1.9899796692884495, -0.39924730334819236, 1.4396068111461806, nan, nan, nan, -1.9899796692884442, 10.681110693507879])

        # -----------------
        cov_list = examples[example_id]
        # Create Float64MultiArray similar to those in the bag files
        dim_0 = MultiArrayDimension(label="", size=7, stride=49)
        dim_1 = MultiArrayDimension(label="", size=7, stride=7)
        cov_dim = [dim_0, dim_1]
        cov_layout = MultiArrayLayout(dim=cov_dim, data_offset=0)
        return Float64MultiArray(layout=cov_layout, data=cov_list)
    def __request_callback(self, msg):
        '''
        Callback if image is received, faces are detected and 
        added to the global array of people
        A message is sent with the ids, locations and face widths 
        in a multiarray.
        '''
        cv_image = self.bridge.imgmsg_to_cv2(msg, 'rgb8')
        if SHOW_RECEIVED_IMAGE:
            cv2.imwrite('rec_img.png', cv_image)
        ret = self.fr.recognize_people(cv_image)
        simple_msg = Float32MultiArray()
        if ret:
            print('new face detected')
            locs, ids = ret
            for i, loc in zip(ids, locs):
                if i in self.recognized_ids:
                    index = self.recognized_ids.index(i)
                    self.recognized_locs[index] = loc
                    self.recognized_times[index] = datetime.datetime.now()
                else:
                    self.recognized_ids.append(i)
                    self.recognized_locs.append(loc)
                    self.recognized_times.append(datetime.datetime.now())
        else:
            print('No faces detected')

        # Publish the computed info on the simple_people topic
        # as a multiarray. The structure is defined as [id, xpos, ypos, width]
        # in a repeated fashion for multiple faces.
        # The width might be used as a metric for how far the face is away
        dim = MultiArrayDimension()
        dim.label = 'id_and_pos'
        dim.size = 3
        dim.stride = 0
        dim2 = MultiArrayDimension()
        dim2.label = 'length_faces'
        dim2.size = len(self.recognized_ids)
        dim2.stride = 0
        simple_msg.layout.dim = [dim, dim2]
        simple_msg.layout.data_offset = 0
        data = []
        # Add the recognized faces around the table to the message
        for i, loc in zip(self.recognized_ids, self.recognized_locs):
            width = self.__get_width(loc)
            x, y, valid = self.__calculate_people_positions(width)
            data = np.concatenate((data, [i, x, y, width]), axis=0)
        simple_msg.data = data
        print(simple_msg)
        # Publish the message
        self.recognition_pub.publish(simple_msg)

        # Check if person is seen in the last TIME_BEFORE_FORGOTTEN seconds and otherwise remove
        for index, time in enumerate(self.recognized_times):
            if abs((time - datetime.datetime.now()
                    ).total_seconds()) > TIME_BEFORE_FORGOTTEN:
                del self.recognized_ids[index]
                del self.recognized_locs[index]
                del self.recognized_times[index]