コード例 #1
0
    def publish(self):
        # compose the multiarray message
        jointVelocities = Float32MultiArray()
        myLayout = MultiArrayLayout()
        myMultiArrayDimension = MultiArrayDimension()

        myMultiArrayDimension.label = "joint_velocities"
        myMultiArrayDimension.size = 1
        myMultiArrayDimension.stride = self.numOfWheels

        myLayout.dim = [myMultiArrayDimension]
        myLayout.data_offset = 0
        jointVelocities.layout = myLayout

        if rospy.get_time() - self.lastTwistTime < self.timeout:

            self.right = 1.0 * self.linearVelocity + self.angularVelocity * self.baseWidth / 2
            self.left = 1.0 * self.linearVelocity - self.angularVelocity * self.baseWidth / 2

            rospy.loginfo("wheels velocities vl, vr [{0:.3f},{1:.3f}]".format(self.left, self.right))

            if self.numOfWheels == 2:
                # first item is left and second is right
                jointVelocities.data = [self.left, self.right]
            elif self.numOfWheels == 4:
                jointVelocities.data = [self.left, self.right, self.left, self.right]
        else:
            if self.numOfWheels == 2:
                jointVelocities.data = [0.0, 0.0]
                # first item is left and second is right
            elif self.numOfWheels == 4:
                jointVelocities.data = [0.0, 0.0, 0.0, 0.0]

        self.joint_velocities_Pub.publish(jointVelocities)
コード例 #2
0
def callback(msg, prevMarkers):
    detectedMarkers = msg.markers
    # The current time in seconds
    now = rospy.get_time()
    for detectedMarker in detectedMarkers:
        measuredTime = detectedMarker.header.stamp.secs
        markerID = detectedMarker.id
        prevMarkers[markerID] = measuredTime

    detected_markers = list()
    for marker in prevMarkers.keys():
        if abs(prevMarkers[marker] - now) > 5:
            del prevMarkers[marker]
        else:
            detected_markers.append(marker)

    array = MultiArrayDimension()
    array.label = 'numMarkers'
    array.size = len(detected_markers)
    array.size = len(detected_markers)
    layout = MultiArrayLayout()
    layout.dim = [
        array,
    ]
    layout.data_offset = 0

    msg = Int16MultiArray()
    msg.layout = layout
    msg.data = detected_markers
    pub.publish(msg)
コード例 #3
0
def callback(msg,prevMarkers):
    detectedMarkers = msg.markers
    now = rospy.get_time() #the current time in seconds
    for detectedMarker in detectedMarkers:
        measuredTime = detectedMarker.header.stamp.secs
        markerID = detectedMarker.id
        prevMarkers[markerID] = measuredTime

    detected_markers =  []   
    for marker in prevMarkers.keys():
        if abs(prevMarkers[marker]-now) > 5: #if the measurement has been stale for 5 seconds
            del prevMarkers[marker]
        else:
            detected_markers.append(marker)

    array1 = MultiArrayDimension()
    array1.label = 'numMarkers'
    array1.size = len(detected_markers)
    array1.size = len(detected_markers)
    layout = MultiArrayLayout()
    layout.dim = [array1,]
    layout.data_offset = 0

    msg = Int16MultiArray()
    msg.layout = layout
    msg.data = detected_markers        
    pub.publish(msg)
コード例 #4
0
def gen_layout(shape):
    layout = MultiArrayLayout()
    layout.dim = [
        gen_dim('dim{}'.format(i), size, 1) for i, size in enumerate(shape)
    ]
    layout.data_offset = 0
    return layout
コード例 #5
0
def create_depth_msg(depth_img):
    command = Float64MultiArray()
    layout = MultiArrayLayout()
    dimension = MultiArrayDimension()
    dimension.label = "depth_img"
    dimension.size = len(depth_img)
    dimension.stride = len(depth_img)
    layout.data_offset = 0
    layout.dim = [dimension]
    command.layout = layout
    command.data = depth_img
    return command
コード例 #6
0
ファイル: Sensor_fusion.py プロジェクト: Niknu/Thesis_2020
    def init_multdata(self):
        msg = MultiArrayLayout()

        msg.data_offset = 0

        msg.dim = [MultiArrayDimension()]

        msg.dim[0].label = "state_estimation"
        msg.dim[0].size = 15
        msg.dim[0].stride = 15

        return msg
コード例 #7
0
ファイル: ES-EKF.py プロジェクト: Niknu/Thesis_2020
    def init_multdata_NEW(self, data):
        msg = MultiArrayLayout()

        msg.data_offset = 0

        msg.dim = [MultiArrayDimension()]

        msg.dim[0].label = "state_estimation"
        msg.dim[0].size = data.shape[0]
        msg.dim[0].stride = data.shape[0]

        return msg
コード例 #8
0
ファイル: Madgwick_ROS.py プロジェクト: Niknu/Thesis_2020
    def init_multdata(self):
        msg = MultiArrayLayout()

        msg.data_offset = 0

        msg.dim = [MultiArrayDimension()]

        msg.dim[0].label= "Quat"
        msg.dim[0].size = 4
        msg.dim[0].stride = 4

        return msg
コード例 #9
0
def publish(vals):
    command = Float64MultiArray()
    layout = MultiArrayLayout()
    dimension = MultiArrayDimension()
    dimension.label = "keyboard_control"
    dimension.size = 4
    dimension.stride = 4
    layout.data_offset = 0
    layout.dim = [dimension]
    command.layout = layout
    command.data = vals
    control_pub.publish(command)
コード例 #10
0
    def __init__(self, arg, rot):

        rospy.on_shutdown(self.shutdown)

        self.pub = rospy.Publisher('joint_velocities',
                                   Float32MultiArray,
                                   queue_size=5)

        rospy.init_node('joint_velocities', anonymous=True)
        self.rate = rospy.Rate(20)
        self.wheels = arg

        if rot:
            r = -1
        else:
            r = 1

        # compose the multiarray message
        self.jointVelocities = Float32MultiArray()
        myLayout = MultiArrayLayout()
        myMultiArrayDimension = MultiArrayDimension()

        myMultiArrayDimension.label = "joint_velocities"
        myMultiArrayDimension.size = 1
        myMultiArrayDimension.stride = numOfWheels

        myLayout.dim = [myMultiArrayDimension]
        myLayout.data_offset = 0
        self.jointVelocities.layout = myLayout

        while not rospy.is_shutdown():
            # first item is left and second is right
            if self.wheels == 2:
                self.jointVelocities.data = [1.0, 1.0 * r]
            elif self.wheels == 4:
                self.jointVelocities.data = [1.0, 1.0, 1.0 * r, 1.0 * r]

            self.pub.publish(self.jointVelocities)
            self.rate.sleep()
コード例 #11
0
    def build_atg(self, obs_save_dir=None):

        time_elapsed = time.time()

        action = None
        first_node_found = False
        reward_s_a = {}
        self.reward_a = 1e-8*np.ones(NUM_ACTIONS)
        while not rospy.is_shutdown():
            self.rate.sleep()
            ros_image = self.get_current_observation().img
            print('Done observing!!')
            cv_image = self.bridge.imgmsg_to_cv2(ros_image)
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
            if NEED_CROP:
                cv_image = cv_image[self.c_coord_tl[1]:self.c_coord_br[1], self.c_coord_tl[0]:self.c_coord_br[0]]
            if COLLECT_DATA:
                if self.observation_count > NUM_DATA_POINTS:
                    break
                if obs_save_dir is not None:
                    obs_save_dir_full = self.data_folder_path + obs_save_dir + '/dataset/'
                    if not os.path.exists(obs_save_dir_full):
                        os.makedirs(obs_save_dir_full)
                    writing_path = 'obs_' + str(self.observation_count) + '.jpg'
                    write_loc = os.path.join(obs_save_dir_full, writing_path)
                    cv2.imwrite(write_loc , cv_image)
                action = random_action()
                self.action_pub.publish(ACTION_PARAMETER_SPACE[action])
                print('Taking action %d'%(action))
                self.rate.sleep()
                print('Done taking action')
                self.observation_count +=1
            else:

                image = to_var(cv_to_tensor(cv_image, image_size=IMAGE_SIZE))
                #recon_loss, recon_loss_norm = get_reconstruction_loss_with_all_ae(image,
                #                                                     self.autoencoder_mixture,
                #                                                     loss_fn = torch.nn.functional.mse_loss)

                #c_aspect_node = current_aspect_node(recon_loss, self.aspect_count)
                recon_ll = get_recon_likelihood_with_all_ae(image, self.autoencoder_mixture)
                c_aspect_node_blief = belief_from_recon_ll(recon_ll)
                c_aspect_node = current_aspect_node_ll(recon_ll)
                print('recon_loss: ', recon_ll)
                #plt.bar(np.arange(len(c_aspect_node_blief)), c_aspect_node_blief)

                #plt.figure()
                #imshow(make_grid(image.cpu().data), True)
                if obs_save_dir is not None:
                    obs_save_dir_full = self.data_folder_path + obs_save_dir + '/' + str(c_aspect_node) + '/'
                    if not os.path.exists(obs_save_dir_full):
                        os.makedirs(obs_save_dir_full)
                    writing_path = 'obs_' + str(self.observation_count) + '.jpg'
                    write_loc = os.path.join(obs_save_dir_full, writing_path)

                    cv2.imwrite(write_loc , cv_image)
                    obs_save_dir_full = self.data_folder_path + obs_save_dir + '/dataset/'
                    if not os.path.exists(obs_save_dir_full):
                        os.makedirs(obs_save_dir_full)
                    writing_path = 'obs_' + str(self.observation_count) + '.jpg'
                    write_loc = os.path.join(obs_save_dir_full, writing_path)
                    cv2.imwrite(write_loc , cv_image)
                atg_mat_fma = Float64MultiArray()
                if np.max(recon_ll) > RECONSTRUCTION_TOLERANCE_LL:
                    print('[Observation: %d  was matched with Aspect node: %d]'%(self.observation_count, c_aspect_node))
                    #c_aspect_node_blief = belief_from_recon_loss(recon_loss)


                    self.time_last_aspect_discoverd += 1
                    if self.time_last_aspect_discoverd > CONVERGENCE_THRESHOLD:
                        break
                else:



                    self.time_last_aspect_discoverd = 0
                    print('Observation: %d not matched hence Creating aspect_%d'%(self.observation_count, self.aspect_count))
                    print()
                    self.aspect_images.append(tensor_to_ros(image.cpu().data, self.bridge))

                    self.autoencoder_mixture[self.aspect_count] = {}
                    self.autoencoder_mixture[self.aspect_count]['autoencoder'] = init_autoencoder()
                    gen_images = generate_random_versions_of_image(image.cpu().squeeze(0), random_transformer, n_versions=N_VERSIONS)
                    if VIZ_GEN_IMAGES:
                        for i in range(len(gen_images)):
                            imshow(make_grid(gen_images[i]), True)
                    if USE_ASPECT_IMAGE:
                        ds = AutoEncoderDataset(gen_images, aspect_image=image)
                    else:
                        ds = AutoEncoderDataset(gen_images, aspect_image=None)

                    optimizer = optim.Adam(self.autoencoder_mixture[self.aspect_count]['autoencoder'].parameters(), lr=1e-3)
                    criterion = nn.BCELoss()
                    data_loader = DataLoader(ds, batch_size=4, shuffle=True)

                    train_autoencoder(self.autoencoder_mixture[self.aspect_count]['autoencoder'],
                                      optimizer,
                                      criterion,
                                      data_loader,
                                      number_of_epochs=NUMBER_OF_EPOCHS,
                                      name='aspect_autoencoder_' + str(self.aspect_count), verbose=VERBOSE)
                    recon_images = self.autoencoder_mixture[self.aspect_count]['autoencoder'](to_var(gen_images)).cpu().data
                    self.autoencoder_mixture[self.aspect_count]['stat'] = stat_of_mse_loss(recon_images, gen_images)

                    #test_image = to_var(gen_images[0].unsqueeze(0))
                    #test_image_recon = self.autoencoder_mixture[self.aspect_count]['autoencoder'](test_image)

                    #recon_error = torch.nn.functional.mse_loss(test_image_recon, test_image)
                    #self.autoencoder_mixture[self.aspect_count]['recon_error'] = recon_error.data.sum()
                    self.autoencoder_mixture[self.aspect_count]['image'] = image
                    #recon_loss, recon_loss_norm = get_reconstruction_loss_with_all_ae(image,
                    #                                                 self.autoencoder_mixture,
                    #                                                 loss_fn = torch.nn.functional.mse_loss)
                    #c_aspect_node_blief = belief_from_recon_loss(recon_loss)
                    recon_ll = get_recon_likelihood_with_all_ae(image, self.autoencoder_mixture)
                    c_aspect_node_blief = belief_from_recon_ll(recon_ll)
                    #rand_img = random_image(self.data_folder_path + obs_save_dir + '/aspect_nodes/')

                    #if rand_img is not None:
                    #    rand_img_recon = self.autoencoder_mixture[self.aspect_count]['autoencoder'](rand_img)
                        #imshow(make_grid(rand_img.cpu().data), True)
                    #    fig=plt.figure(figsize=(18, 16), dpi= 100, facecolor='w', edgecolor='k')
                    #    viz = torch.stack([test_image, test_image_recon, rand_img, rand_img_recon]).squeeze(1).cpu().data
                    #    if VIZ_GEN_IMAGES:
                    #        imshow(make_grid(viz), True)

                    if obs_save_dir is not None:
                        obs_save_dir_full = self.data_folder_path + obs_save_dir + '/aspect_nodes/'
                        if not os.path.exists(obs_save_dir_full):
                            os.makedirs(obs_save_dir_full)
                        writing_path = 'aspect_' + str(self.aspect_count) + '.jpg'
                        write_loc = os.path.join(obs_save_dir_full, writing_path)

                        cv2.imwrite(write_loc , cv_image)
                    self.aspect_count += 1

                if action is not None:

                    #print(len(self.prev_aspect_node_belief), len(c_aspect_node_blief))
                    #print('ATG BEFORE: ', self.atg, c_aspect_node_blief)
                    for s in range(len(self.prev_aspect_node_belief)):
                        for s_prime in range(len(c_aspect_node_blief)):
                            atg_entry_key = (s, action,  s_prime)

                            self.atg_mat = atg_dict_to_mat(self.atg, len(c_aspect_node_blief), NUM_ACTIONS)


                            if first_node_found:

                                H_t = entropy_from_belief(self.atg_mat[s, action, :])

                            if atg_entry_key in self.atg.keys():
                                self.atg[atg_entry_key] += self.prev_aspect_node_belief[s] * c_aspect_node_blief[s_prime]
                            else:
                                reward_s_a[(s, action)] = {}
                                reward_s_a[(s, action)]['queue'] = [1.]
                                reward_s_a[(s, action)]['mean'] = None
                                reward_s_a_mat = np.ones((len(self.prev_aspect_node_belief), NUM_ACTIONS))
                                self.atg[atg_entry_key] = self.prev_aspect_node_belief[s] * c_aspect_node_blief[s_prime]

                            self.atg_mat = atg_dict_to_mat(self.atg, len(c_aspect_node_blief), NUM_ACTIONS)

                            if first_node_found:
                                H_t_1 = entropy_from_belief(self.atg_mat[s, action, :])
                                delta_H = H_t_1 - H_t
                                queue, running_mean = update_reward(reward_s_a[(s, action)]['queue'], delta_H)
                                reward_s_a[(s, action)]['queue'] = queue
                                reward_s_a[(s, action)]['mean']  = running_mean
                                reward_s_a_mat = reward_dict_to_mat(reward_s_a, len(self.prev_aspect_node_belief))
                                #print(s, action, running_mean)
                                #print(delta_H)
                    #print('ATG AFTER: ', self.atg)
                    if not first_node_found:
                        first_node_found = True

                    print('Time since new aspect node discoverd: ', self.time_last_aspect_discoverd ,
                          '   Number of Aspect nodes: ', self.aspect_count,
                          'Time (secs)', time.time() - time_elapsed)

                    self.reward_a = np.zeros(len(ACTION_PARAMETER_SPACE))
                    for act in range(len(ACTION_PARAMETER_SPACE)):
                        #print('reward: ', reward_s_a_mat[:, act])
                        self.reward_a[act] = (self.prev_aspect_node_belief*abs(reward_s_a_mat[:, act])).sum()
                    #self.atg_mat = atg_dict_to_mat(self.atg, self.aspect_count, len(ACTION_PARAMETER_SPACE))
                    #print(self.atg_mat)
                    atg_mat_layout = MultiArrayLayout()
                    dim1 = MultiArrayDimension()
                    dim2 = MultiArrayDimension()
                    dim3 = MultiArrayDimension()
                    dim1.label = 's'
                    dim1.size  = self.atg_mat.shape[0]

                    dim2.label = 'a'
                    dim2.size  = self.atg_mat.shape[1]

                    dim3.label = 's_prime'
                    dim3.size  = self.atg_mat.shape[2]

                    dims = [dim1, dim2, dim3]
                    atg_mat_layout.dim = dims
                    atg_mat_layout.data_offset = 0

                    atg_mat_fma.layout = atg_mat_layout
                    atg_mat_fma.data = self.atg_mat.reshape(1, -1).tolist()[0]

                aspect_nodes = AspectNodes()
                aspect_nodes.data = self.aspect_images

                self.aspect_nodes_pub.publish(aspect_nodes)
                self.atg_pub.publish(atg_mat_fma)


                self.observation_count += 1
                self.prev_aspect_node = c_aspect_node
                self.prev_aspect_node_belief = c_aspect_node_blief
                if ACTION_SELECTION_MODE =='RANDOM':
                    action = random_action()
                elif ACTION_SELECTION_MODE == 'IM':
                    action = im_action(self.reward_a)
                else:
                    print('UNKOWN ACTION SELECTION!!')
                self.action_pub.publish(ACTION_PARAMETER_SPACE[action])
                print('Taking action %d'%(action))
                self.rate.sleep()
                print('Done taking action')
        if not COLLECT_DATA:
            print('ATG took %.2f secs to discover %d Aspect Nodes and converge'%(time.time()-time_elapsed, self.aspect_count))
            result = {}
            result['autoencoder_mixture'] = self.autoencoder_mixture
            result['atg'] = self.atg_mat
            result['running_time'] = time.time()-time_elapsed
            result['n_aspect_nodes'] = self.aspect_count
            return result
コード例 #12
0
                    scan.range_max = MAX_LASER_RANGE
                scan.angle_increment = angle_step
                scan.time_increment = .0
                scan.ranges = simulated_laser
                scan_pub.publish(scan)

                #### publish the costmap
                dim1 = MultiArrayDimension()
                dim1.label = "height"
                dim1.size = GRID_HEIGHT
                dim1.stride = GRID_HEIGHT * GRID_WIDTH
                dim2 = MultiArrayDimension()
                dim2.label = "width"
                dim2.size = GRID_WIDTH
                dim2.stride = GRID_WIDTH
                dimentions = [dim1, dim2]
                layout = MultiArrayLayout()
                layout.dim = dimentions
                layout.data_offset = 0
                array = Float64MultiArray()
                array.layout = layout
                array.data = local_costmap.flatten().tolist()
                costmap_pub.publish(array)

                seq_num += 1
                #print simulated_laser

        rate.sleep()

    # rospy.spin()