def __init__(self):
        """
         Load your agent here and initialize anything needed
         WARNING: any path to files you wish to access on the docker should be ABSOLUTE PATHS
        """

        # functional modules
        self.perception = Perception(
            self
        )  # generate high-level perception based on current observation
        self.strategy = Strategy(
            self
        )  # implement strategy to chase the food given the high-level perception
        self.action_state_machine = ActionStateMachine(
            self)  # use a finite state machine to help implement the strategy
        self.chaser = Chaser(self)  # responsible for chase the food

        # primitive perception
        self.obs_visual = None  # visual input in RGB space, float numpy array of shape (84, 84, 3)
        self.obs_visual_hsv = None  # visual input in HSV space, float numpy array of shape (84, 84, 3)
        self.obs_vector = None  # speed vector (left-right, up-down, forward-backward), float numpy array of shape (3,)
        self.done = None  # if the current test is done, bool
        self.reward = None  # current reward from the env, float
        self.info = None  # the brainInfo object from the env
        self.t = None  # how long will this test last? int
        self.step_n = None  # the current time step, int

        # high-level perceptions
        # Thought the environment is 3D, all the representation here is in the 2D plane of images.
        self.is_green = None  # bool numpy array of shape (84, 84), to indicate if each pixel is green (food color)
        self.is_brown = None  # bool numpy array of shape (84, 84), to indicate if each pixel is brown (food color)
        self.is_red = None  # bool numpy array of shape (84, 84), to indicate if each pixel is red (danger color)
        self.is_blue = None  # bool numpy array of shape (84, 84), to indicate if each pixel is blue (sky color)
        self.is_gray = None  # bool numpy array of shape (84, 84), to indicate if each pixel is gray (wall color)
        self.target_color = None  # the color is currently looking for, either brown or green
        self.is_inaccessible = None  # bool numpy array of shape (84, 84), to indicate if each pixel is inaccessible
        # because it might be a wall pixel, sky pixel or a dangerous pixel.
        self.is_inaccessible_masked = None  # bool numpy array of shape (84, 84), is_inaccessible
        # without the pixels on the four sides set to be accessible forcefully.
        self.reachable_target_idx = None  # int numpy array of (2,), to indicate where the target is in the visual input
        self.reachable_target_size = None  # int, to indicate how many pixels in the target
        self.nearest_inaccessible_idx = None  # int numpy array of (2,), to indicate the nearest inaccessible pixel

        # memory use queues as memory to save the previous observation
        self.is_green_memory = queue.Queue(maxsize=AgentConstants.memory_size)
        self.is_brown_memory = queue.Queue(maxsize=AgentConstants.memory_size)
        self.is_red_memory = queue.Queue(maxsize=AgentConstants.memory_size)
        self.vector_memory = queue.Queue(maxsize=AgentConstants.memory_size)

        # the action that will be returned to the env
        self.current_action = None

        # strategy-related variables
        self.pirouette_step_n = None  # int, counting the how long the agent has remain static and rotating
        self.target_color = None  # the color that the agent is looking for, either "green" or "brown"
        self.exploratory_direction = None  # int, if nowhere to go, go in this direction
        self.not_seeing_target_step_n = None  # int, how long the agent has lost the visual of a target
        self.chase_failed = None  # bool, if it failed to reach a target because path planning failed
        self.search_direction = None  # either left or right, to start searching for the target
        self.static_step_n = None  # int, how long the agent's position hasn't changed
示例#2
0
    def __init__(self, env):
        super(SafetyEnvelope, self).__init__(env)

        # Grab configuration
        self.config = cg.Configuration.grab()

        # Action proposed by the agent
        self.propsed_action = None

        # Action proposed by the monitor
        self.shaped_action = None

        # List of all monitors with their states, rewards and unsafe-actions
        self.meta_monitor = []

        # Dictionary that gets populated with information by all the monitors at runtime
        self.monitor_states = {}

        # Perceptions of the agent, it gets updated at each step with the current observations
        self.perception = Perception(env.gen_obs_decoded())

        # Set rewards
        self.step_reward = self.config.rewards.standard.step
        self.goal_reward = self.config.rewards.standard.goal
        self.death_reward = self.config.rewards.standard.death
示例#3
0
def main():
    global block_t, starfish_t

    cv2.namedWindow('image')

    perception_obj = Perception()

    # Make trackbars, with default values above
    cv2.createTrackbar('Block Threshold', 'image', block_t, 255, nothing)
    cv2.createTrackbar('Starfish Threshold', 'image', starfish_t, 255, nothing)
    print(
        "Showing video. With the CV window in focus, press q to exit, p to pause."
    )
    while (1):

        # get current positions of trackbars
        block_t = cv2.getTrackbarPos('Block Threshold', 'image')
        starfish_t = cv2.getTrackbarPos('Starfish Threshold', 'image')

        perception_obj.block_t = block_t
        perception_obj.starfish_t = starfish_t

        scene = perception_obj.get_all_targets()
        img = perception_obj.frame
        img = label_scene(img, scene)
        if img is not None:
            cv2.imshow('image', img)
            key = cv2.waitKey(66)  # Delay for 66 ms
            if key == ord('q'):  # Press q to exit, p to pause
                break
            if key == ord('p'):
                cv2.waitKey(-1)  #wait until any key is pressed

    cv2.destroyAllWindows()
示例#4
0
    def init_perception(self):
        """Instantiates all perceptual modules.

        Each perceptual module should have a perceive method that is
        called by the base agent event loop.
        """
        if not hasattr(self, "perception_modules"):
            self.perception_modules = {}
        self.perception_modules["self"] = SelfPerception(self)
        self.perception_modules["vision"] = Perception(self, self.opts.perception_model_dir)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--player", help="player 1-6", type=int, default=1)
    args = parser.parse_args()

    config = Config(CONFIG_FILE, args.player)

    comms_config = {
        'rx_ip': config.client_ip,
        'rx_port': config.client_port,
        'tx_ip': config.sim_ip,
        'tx_port': config.sim_port
    }
    print("Rx at {}:{}".format(comms_config["rx_ip"], comms_config["rx_port"]))
    print("Tx to {}:{}".format(comms_config["tx_ip"], comms_config["tx_port"]))

    commands_mutex = Lock()

    # Launch comms in background thread
    comms = CommsThread(comms_config, False, commands_mutex)
    comms.daemon = True
    comms.start()

    # Launch perception, motion planning, and controls in main thread
    sweep_builder = SweepBuilder()
    perception = Perception(config)
    planning = Planning(config)
    controls = Controls(config)
    visualize = Visualize(config)

    try:
        while True:
            vehicle_state = sweep_builder.run(comms.get_vehicle_states())
            if vehicle_state is not None:
                t1 = time.time()

                world_state = perception.run(vehicle_state)
                plan = planning.run(world_state)
                vehicle_commands = controls.run(plan)

                t2 = time.time()
                freq = 1 / (t2 - t1)
                print(f"Running at {freq} Hz")

                vehicle_commands['draw'] = visualize.run(world_state, plan)
                with commands_mutex:
                    # hold the lock to prevent the Comms thread from
                    # sending the commands dict while we're modifying it
                    comms.vehicle_commands.update(vehicle_commands)
    except KeyboardInterrupt:
        pass
示例#6
0
    def __init__(self, config):
        self.config = config
        self._nav_pose_list, self._nav_pose_dict = get_nav_pose(
            self.config.poses_file_path)

        self._memory = Memory(self)
        self._ear = Ear(self)
        self._leg = Leg(self)
        self._arm = Arm(self)
        self._perception = Perception(self)
        self._mouth = Mouth(self)

        self.last_poses = []
        self.find_obj_times = defaultdict(int)
示例#7
0
    def __init__(self):
        rospy.init_node('action')
        # Fetch pre-built action matrix. This is a 2d numpy array where row indexes
        # correspond to the starting state and column indexes are the next states.
        #
        # A value of -1 indicates that it is not possible to get to the next state
        # from the starting state. Values 0-9 correspond to what action is needed
        # to go to the next state.
        #
        # e.g. self.action_matrix[0][12] = 5
        self.action_matrix = np.loadtxt(path_prefix + "action_matrix.txt")

        # Fetch actions. These are the only 9 possible actions the system can take.
        # self.actions is an array of dictionaries where the row index corresponds
        # to the action number, and the value has the following form:
        # { dumbbell: "red", block: 1}
        colors = ["red", "green", "blue"]
        self.actions = np.loadtxt(path_prefix + "actions.txt")
        self.actions = list(
            map(lambda x: {
                "dumbbell": colors[int(x[0])],
                "block": int(x[1])
            }, self.actions))

        # Load converged q_matrix from output file in 2-D array format
        self.q_matrix_path = os.path.join(os.path.dirname(__file__),
                                          '../output/q_matrix.csv')
        self.q_matrix = np.loadtxt(self.q_matrix_path, delimiter=',')

        # Use converged q_matrix to find optimal sequence of actions
        self.action_seq = []
        self.get_actions()

        # Identifies current action from 0-2, transition occurs when dumbbell is placed
        self.curr_action = 0

        # Identifies distance of closest object within a 10 degree arc in front of the robot
        self.distance = 100
        # Bring in tools from other libraries/files
        self.p = Perception()
        self.cmd_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
        self.scan_sub = rospy.Subscriber("/scan", LaserScan,
                                         self.scan_distance)
        self.move_group_arm = moveit_commander.MoveGroupCommander("arm")
        self.move_group_gripper = moveit_commander.MoveGroupCommander(
            "gripper")

        # Record state of arm (lowered or raised)
        self.arm_dropped = True
示例#8
0
    def __init__(self, parser, perception_func):

        self.parser = parser
        self.perception = Perception(perception_func)
        self.pddl = self.parser
        self.goal_tracking = GoalTracking(self.parser, self.perception)
        self.problem_generator = ProblemGenerator(self.perception, self.parser,
                                                  "tmp_problem_generation")

        self.valid_actions = TrackedSuccessorValidActions(
            self.pddl, self.problem_generator, self.goal_tracking)

        self.on_action_observers = [
            self.goal_tracking.on_action, self.valid_actions.on_action,
            self.perception.on_action
        ]
示例#9
0
def drive():
    camera = request.args.get("camera")
    model = request.args.get("model")
    record = request.args.get("record")

    recordings_dir = None
    if record is not None:
        processor = StoreFileProcessor()
        automatic_controller.processors.append(processor)
        recordings_dir = processor.output_folder

    global perception
    if perception is not None:
        perception.shutdown()
    perception = Perception(camera, model)
    automatic_controller.start(perception)
    return render_template('drive.html', recordings_dir=recordings_dir)
示例#10
0
    def __init__(self, parser, perception_func, planner=None):

        self.parser = parser
        self.planner = planner or make_plan
        self.perception = Perception(perception_func)
        self.pddl = self.parser
        self.goal_tracking = GoalTracking(self.parser, self.perception)
        self.problem_generator = ProblemGenerator(self.perception, self.parser,
                                                  "tmp_problem_generation")

        self.valid_actions = ValidActions(self.pddl, self.perception,
                                          self.problem_generator,
                                          self.goal_tracking)

        self.on_action_observers = [
            self.goal_tracking.on_action, self.valid_actions.on_action,
            self.perception.on_action
        ]
示例#11
0
        async def on_start(self):
            self.terminate = False
            self.SUCCESS = False
            self.verbose = False

            # Initialization
            self.beliefs = WorldModel()  # B := B0; Initial Beliefs
            self.goals = self.beliefs.current_world_model.goals
            self.intentions = self.beliefs.current_world_model.goals  # I := I0; Initial Intentions
            self.htn_planner = HierarchicalTaskNetworkPlanner(self.beliefs)
            self.perception = Perception(self.beliefs)
            self.coordination = Coordination(self.beliefs)
            self.monitoring = Monitoring()

            self.what, self.why, self.how_well, self.what_else, self.why_failed = "", "", "", "", ""
            self.plans = []
            self.selected_plan = []
            self.percept = {}
            self.action = ""
            self.start_time = datetime.datetime.now()
示例#12
0
        async def on_start(self):

            self.terminate = False
            self.SUCCESS = False
            self.verbose = False
            # Initialization
            self.htn_planner = HierarchicalTaskNetworkPlanner()
            self.goal = [('transfer_target_object_to_container', 'arm',
                          'target_object', 'table', 'container')]
            self.intentions = self.goal  # I := I0; Initial Intentions
            self.beliefs = WorldModel()  # B := B0; Initial Beliefs
            self.monitoring = Monitoring()
            self.perception = Perception()
            self.coordination = Coordination()

            # Disable all 3 coordination switches for testing
            self.coordination.control.send_requests = True
            self.coordination.control.center_init = False
            self.coordination.control.detect_last_position = True

            self.what, self.why, self.how_well, self.what_else, self.why_failed = "", "", "", "", ""
            self.plans = []
            self.start_time = datetime.datetime.now()
示例#13
0
def train_and_perception():
    p = Perception(2, f)
    input_vecs, labels = get_training_dataset()
    p.train(input_vecs, labels, 10, 0.1)
    return p
        stopwords = fileutils.get_stopwords(argv[2])
        tfidf_generate_features.generate_features(argv[1], features_save_path,
                                                  dictionary_save_path,
                                                  stopwords)

    print "Starting training .."

    # get the names for the sets
    set_names = fileutils.get_documents_by_path(features_save_path)
    dictionary_size = fileutils.get_dictionary_size(dictionary_save_path +
                                                    "dictionary")
    avg_precision, avg_recall, avg_f1 = 0, 0, 0

    for i in xrange(0, len(set_names)):

        p = Perception(0.25, dictionary_size)

        print "=" * 10, "EPOCH", i + 1, "=" * 10

        # get the sets for training and testing
        training_set_names = set_names[:]
        test_set_names = [training_set_names.pop(i)]
        training_set = fileutils.get_data_set(features_save_path,
                                              training_set_names,
                                              dictionary_size)
        test_set = fileutils.get_data_set(features_save_path, test_set_names,
                                          dictionary_size)

        print "Training sets:", training_set_names
        print "Test set:", test_set_names
 def setUp(self):
     self.config = Mock()
     self.config.field_elements = [Polygon(make_square_vertices(side_length=2, center=(-5, -5)))]
     self.config.ball_radius = BALL_RADIUS
     self.perception = Perception(self.config)
示例#16
0
    df = pd.read_csv('Data.txt')
    # print(df.tail())
    y = df['Species'].values
    # y = df.iloc[0:100, 4].values
    y = np.where(y == 'setosa', -1, 1)
    X = df.iloc[:, [0, 2]].values
    plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')
    plt.scatter(X[50:100, 0],
                X[50:100, 1],
                color='blue',
                marker='o',
                label='versicolor')
    plt.xlabel('petal length')
    plt.ylabel('sepal length')
    plt.legend(loc='upper left')
    plt.show()

    #start training
    ppn = Perception(eta=0.1, n_iter=10)
    ppn.fit(X, y)
    plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
    plt.xlabel('Epoches')
    plt.ylabel('Number of misclassifications')
    plt.show()

    #画分类超平面
    plot_decision_region(X, y, classifier=ppn)
    plt.xlabel('sepal length [cm]')
    plt.ylabel('petal length [cm]')
    plt.legend(loc='upperleft')
    plt.show()
示例#17
0

if __name__ == '__main__':

    terminate = False
    SUCCESS = False
    verbose = False

    # Initialization
    htn_planner = HierarchicalTaskNetworkPlanner()
    goal = [('transfer_target_object_to_container', 'arm', 'target_object',
             'table', 'container')]
    intentions = goal  # I := I0; Initial Intentions
    beliefs = WorldModel()  # B := B0; Initial Beliefs
    monitoring = Monitoring()
    perception = Perception()
    coordination = Coordination()
    # Disable all 3 coordination switches for testing
    coordination.control.send_requests = True
    coordination.control.center_init = False
    coordination.control.detect_last_position = True

    what, why, how_well, what_else, why_failed = "", "", "", "", ""
    start_time = datetime.datetime.now()
    """
        Agent control loop version 7 (Intention Reconsideration)
        I := I0; Initial Intentions
        B := B0; Initial Beliefs
        while true do
          get next percept ρ; #  OBSERVE the world
          B:= brf(B, ρ); #  Belief revision function
示例#18
0
 def perceives(self, environment):
     return Perception(environment.get_is_dirty(),
                       environment.get_location())
示例#19
0
# -*- coding: utf-8 -*-
# 调用Perception函数
import torch
from perception import Perception
perception = Perception(2, 3, 2)
for name, parameter in perception.named_parameters():
    print(name, parameter)
data = torch.rand(4, 2)
output = perception(data)
print(output)
示例#20
0
# -*- coding: utf-8 -*-
"""
@File    : test02.py
@Time    : 12/9/20 7:56 PM
@Author  : Mingqiang Ning
@Email   : [email protected]
@Modify Time        @Version    @Description
------------        --------    -----------
12/9/20 7:56 PM      1.0         None
# @Software: PyCharm
"""
import torch
from perception import Perception
net=Perception(2,3,2)
# print(net)
# for name, parameter in net.named_parameters():
#     print(name,parameter)
data=torch.randn(4,2)
# print(data)
out = net(data)
print(out)

示例#21
0
    def __init__(self, env):
        super(SafetyEnvelope, self).__init__(env)

        # Grab configuration
        self.config = cg.Configuration.grab()

        # Action proposed by the agent
        self.propsed_action = None

        # Action proposed by the monitor
        self.shaped_action = None

        # List of all monitors with their states, rewards and unsafe-actions
        self.meta_monitor = []

        # Dictionary that gets populated with information by all the monitors at runtime
        self.monitor_states = {}

        # Perceptions of the agent, it gets updated at each step with the current observations
        self.perception = Perception(env.gen_obs_decoded())

        # Set rewards
        self.step_reward = self.config.rewards.standard.step
        self.goal_reward = self.config.rewards.standard.goal
        self.death_reward = self.config.rewards.standard.death

        # Dictionary that contains all the type of monitors you can use
        dict_monitors = {
            'precedence': Precedence,
            'response': Response,
            'universality': Universality,
            'absence': Absence
        }

        for monitor_types in self.config.monitors:
            for monitors in monitor_types:
                for monitor in monitors:
                    if monitor.active:
                        if hasattr(monitor, 'conditions'):
                            new_monitor = dict_monitors[monitor.type](
                                monitor.type + "_" + monitor.name,
                                monitor.conditions, self._on_monitoring,
                                monitor.rewards, self.perception,
                                monitor.context)
                        self.meta_monitor.append(new_monitor)
                        self.monitor_states[new_monitor.name] = {}
                        self.monitor_states[new_monitor.name]["state"] = ""
                        self.monitor_states[
                            new_monitor.name]["shaped_reward"] = 0
                        self.monitor_states[
                            new_monitor.name]["unsafe_action"] = ""
                        self.monitor_states[
                            new_monitor.name]["mode"] = monitor.mode
                        if hasattr(monitor, 'action_planner'):
                            self.monitor_states[new_monitor.name][
                                "action_planner"] = monitor.action_planner
                        else:
                            self.monitor_states[
                                new_monitor.name]["action_planner"] = "wait"

        print("Active monitors:")
        for monitor in self.meta_monitor:
            print(monitor)
        self._reset_monitors()
 def setUp(self):
     self.config = Mock()
     self.config.field_elements = [Polygon(make_square_vertices(side_length=1, center=(0,0)))]
     self.config.outer_wall = Polygon(make_square_vertices(side_length=2, center=(0,0)))
     self.perception = Perception(self.config)
script for the final project logic main loop
"""

import sys
import numpy as np
sys.path.append('/home/pi/ArmPi/')
from paw_class import Paw
from perception import Perception, show_image, label_scene
import numpy as np
from ArmIK.Transform import convertCoordinate
import db_txt as db

if __name__ == '__main__':
    count = 0

    perception_obj = Perception()
    paw_obj = Paw()
    final_pos = (-15 + 0.5, 12 - 0.5, 1.5)  # x block home
    img_size = (640, 480)

    while True:
        # perception code
        count = count + 1
        #perception_obj.get_frame()
        scene = perception_obj.get_all_targets()
        img = perception_obj.frame
        img = label_scene(img, scene)
        if img is not None:
            print(scene)
            show_image(img)