def __init__(self):  # type: () -> None
     self._robot_state = RobotState()
     self._robot_control = RobotControl()
     self._goal_pool = GoalPool()
     self._grid = Grid()
     self._goal_selector = GoalSelector()
     self._path_finder = PathFinder()
     self._marker_drawer = MarkerDrawer()
     self._current_goal = None
Esempio n. 2
0
def main(graph, gazebo, graph_width, graph_height):
    if gazebo:
        rc = RobotControl()

    # Create a shrink machine object
    shrink = ShrinkMachine()

    # When start method is called, master is created
    shrink.start()
    states_master, states_slave1, states_slave2 = shrink.get_defined_states()
    edges_master, edges_slave1, edges_slave2 = shrink.get_defined_transitions()

    if graph:
        graph = MachineGraph((states_master, states_slave1, states_slave2),
                             (edges_master, edges_slave1, edges_slave2),
                             graph_width, graph_height)

    print("Initial machine: " + str(shrink.get_current_machine()))
    print("Initial state: " + str(shrink.get_current_state()))

    while True:
        possible_trans = shrink.get_possible_transitions()
        print("INFO: if the state doesn't change, click enter :)")
        print("Possible transitions: ")
        #user_tran = input(f"Expected trans is{shrink.get_possible_transitions()}: ")
        for i in range(len(possible_trans)):
            print('\t' + str(i) + ": " + str(possible_trans[i]))
        user_tran = None

        if graph:
            graph.display(shrink)

        try:
            user_id = int(input("User transition (type a number only): "))
            if isinstance(user_id, int) and user_id < len(possible_trans):
                user_tran = possible_trans[user_id]
            else:
                print(colored('main Oj byczq sys32', 'red'))
                break
        except:
            print("It wasnt't a number\n")
            break

        if user_tran is not None:
            shrink.run_transition([user_tran])
        print("Current machine: " + str(shrink.get_current_machine()))
        print("Current state: " + str(shrink.get_current_state()))

        states = shrink.get_defined_states()
        trans = shrink.get_defined_transitions()
        s = states[0][0].transitions

        if gazebo:
            rc.move_robot(shrink.get_current_state().value)

    shrink.stop()
Esempio n. 3
0
    def __init__(self, ip):
        rospy.init_node('robot_toolkit_node', anonymous=True)

        self._robotControl = RobotControl(ip)
        self._robotControl.subscribeTopics()

        self._robotInteraction = RobotInteraction(ip)
        self._robotInteraction.initSpeakers()
        self._robotInteraction.robotSpeakers.subscribeTopics()

        self._robotInteraction.initT2S()
        self._robotInteraction.robotT2S.subscribeTopics()

        self._robotInteraction.initLEDs()
        self._robotInteraction.robotLEDs.subscribeTopics()

        self._robotTabletManagement = RobotTabletManagement(ip)
    def __init__(self):

        RobotControl.__init__(self)  # Initialize RobotControl

        self.action_space = Discrete(['i', 'j', 'l', 'p'])
        self.observation_space = Box('iRobot/camera/image_raw')

        rospy.Subscriber('/irobot_bumper',
                         ContactsState,
                         callback=self._bumper_callback)

        # Count steps, so we can know when game is Done (dont play forever)
        self.nb_steps = 500
        self.is_bumper_triggered = False
        self.step_counter = self.nb_steps

        # Step counter
        self._step_counter = self.nb_steps
def main():  # type: () -> None
    """ Main function for the manual robot control. """
    rospy.init_node("manual_robot_control")

    robot_control = RobotControl()

    while True:
        action = raw_input()

        movement = Movement.STANDING
        steering = Steering.STRAIGHT

        if 'w' in action and 's' not in action:
            movement = Movement.FORWARD
        elif 's' in action and 'w' not in action:
            movement = Movement.BACKWARD

        if 'a' in action and 'd' not in action:
            steering = Steering.LEFT
        elif 'd' in action and 'a' not in action:
            steering = Steering.RIGHT

        robot_control.move(movement, steering)
Esempio n. 6
0
def main(video_src=2):
    # Initiate Face Tracker
    # Todo 190207
    # os.path에 해당 폴더 없으면 만들기
    face_tracker = FaceTracker(video_device_id=int(video_src),
                               enable_age_gender=True,
                               age_gender_model_path=os.path.join(
                                   'pretrained_models', 'age_gender',
                                   'weights-wkfd.hdf5').replace("\\", "/"),
                               age_type="min")

    # Initiate some variables
    _var = None

    if sys.argv[2] == "0":
        robot_ip = None
        client_socket = None
    else:
        robot_ip = "192.168.0.53"
        print("Connecting to robot", robot_ip)
        client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        client_socket.bind(("0.0.0.0", 0))
        client_socket.connect((robot_ip, 8250))
        print("Connected to the robot.")

    robot_control = RobotControl(robot_ip, client_socket)
    robot_face = '05'
    target_face_index = None
    target_face_id_in_db = None

    if sys.argv[3] == "1":
        _update_flag = True
    else:
        _update_flag = False
    social_relation_estimator = SocialRelationEstimator(
        robot_control, update_flag=_update_flag, enable_speaker=True)

    while True:
        s_time = time.time()
        # Grab a single frame of video
        ret, frame = face_tracker.video_capture.read()
        process_frame = face_tracker.run(frame)
        move_flag = None

        if process_frame:
            # Select largest face which is the closest one from the camera
            # Todo 190131
            # Kalman filter Target ID가 특정(짧은) 안에 다시 잡힐경우 얼굴 크기가 달라져도 계속 추적 -> embedding group
            if target_face_id_in_db is None and len(
                    face_tracker.index_in_known_data) > 0 and len(
                        face_tracker.index_in_known_data) == len(
                            face_tracker.face_locations):
                # 현재 face location들을 기반으로 가장 큰 얼굴의 index를 찾는다.
                target_face_index = face_tracker.select_largest_face(
                )  # location 기반

                # 이 값은 DB에서의 ID값 -> 고유값
                target_face_id_in_db = face_tracker.index_in_known_data[
                    target_face_index]  # Data 기반

                target_det_time = time.time()

            if target_face_id_in_db is not None:
                '''
                1. 대상이 없을 경우 가장 큰 얼굴을 잡는다. # face_tracker.select_largest_face()
                2. 대상이 가장 큰 얼굴이 아니어도 기존에 추적중이었다면 쫓아간다.
                3. 대상이 사라졌을 경우 (5)초 동안은 대상을 유지하고 가던 방향으로 속도를 줄인다.
                '''
                random_utter_flag = robot_control.random_utterance.flag
                if random_utter_flag == 2:
                    # 랜덤 멘트 재생중
                    move_flag = 2
                else:
                    if len(face_tracker.face_locations) > 0:
                        # print(target_face_index, face_tracker.index_in_known_data)
                        if len(face_tracker.index_in_known_data) > 0 and len(
                                face_tracker.index_in_known_data) == len(
                                    face_tracker.face_locations):
                            try:
                                # 현재 찾아낸 얼굴들의 순서를 기준으로 계속 타겟중인 얼굴의 ID가 몇번째에 위치하는지 찾기.
                                target_face_index = face_tracker.index_in_known_data.index(
                                    target_face_id_in_db)

                                # 현재 타겟의 이름
                                target_face_id = face_tracker.known_face_names[
                                    target_face_id_in_db].split(
                                        "ID:")[1].split(",")[0]
                                print('Target Face Name Id:', target_face_id)

                                # 현재 보이는 얼굴들의 순서에 DB의 ID값을 대입
                                # visible_face_index_in_db과 face_tracker.index_in_known_data는 같은듯..? Todo 190208 (완) - 같음
                                # visible_face_index_in_db = [face_tracker.index_in_known_data[i] for i, _ in enumerate(face_tracker.face_locations)]

                                # print(visible_face_index_in_db)
                                if target_face_id_in_db in face_tracker.index_in_known_data:
                                    # 현재 보이는 얼굴들에 기존 타겟이 있음.
                                    '''
                                    move_flag
                                    0: 목표가 그대로 보임
                                    1: 목표가 보이지 않음 -> Slow Down
                                    2: 목표가 보이지 않고 새로운 대상 없음 -> Stop
                                    '''
                                    move_flag = 0
                                    target_face_location = face_tracker.face_locations[
                                        target_face_index]
                                    target_det_time = time.time()
                                else:
                                    # 목표가 현재 보이지 않음.
                                    move_flag = 1
                            except:
                                # print("target_face_id_in_db", target_face_id_in_db, face_tracker.index_in_known_data)
                                target_face_id = face_tracker.known_face_names[
                                    target_face_id_in_db].split(
                                        "ID:")[1].split(",")[0]
                                # print('Target Face Name Id:',target_face_id)

                                for _vi, _visible_index in enumerate(
                                        face_tracker.index_in_known_data):
                                    visible_face_id = face_tracker.known_face_names[
                                        _visible_index].split("ID:")[1].split(
                                            ",")[0]

                                    if visible_face_id == target_face_id:
                                        # print('Visible Face Name Id:',visible_face_id)
                                        move_flag = 0
                                        target_face_location = face_tracker.face_locations[
                                            _vi]

                                # print(face_tracker.known_face_groups)

                                if move_flag != 0:
                                    # 목표가 사라졌고, 새로운 얼굴이 나타남
                                    if time.time() - target_det_time > 10:
                                        move_flag = 1
                                        target_face_index = None
                                        target_face_id_in_db = None
                                    else:
                                        move_flag = 1

                        elif time.time() - target_det_time > 10:
                            move_flag = 2
                            target_face_index = None
                            target_face_id_in_db = None
                        else:
                            print("Detecting..")
                            move_flag = 1

                    elif time.time() - target_det_time > 5 and len(
                            face_tracker.face_locations) == 0:
                        # 5초 이상 목표가 보이지 않을 시
                        move_flag = 2
                        target_face_index = None
                        target_face_id_in_db = None
                    else:
                        move_flag = 1
                print("Move flag", move_flag)

                try:
                    target_name = face_tracker.known_face_names[
                        target_face_id_in_db]
                except:
                    target_name = None

                if move_flag == 0:
                    # The actual robot part
                    # print(target_face_location, type(target_face_location))
                    if face_tracker.center_location is not None:
                        print("두명 탐지됨")
                        target_face_location = face_tracker.center_location

                    _var = robot_control.run(_var, robot_face, target_name,
                                             target_face_location, frame,
                                             move_flag,
                                             social_relation_estimator)
                else:
                    # The actual robot part
                    _var = robot_control.run(_var, robot_face, target_name,
                                             None, frame, move_flag,
                                             social_relation_estimator)

                # 관계 추정 부분
                # print(robot_control.status)
                if robot_control.status == 0 and len(
                        face_tracker.known_face_ages) == len(
                            face_tracker.known_face_names):
                    # 거리가 일정 거리 이하고, Detect된 얼굴 면적 차이가 일정 크기 이하일 경우 Select

                    relevant_face_index = face_tracker.get_relevant_faces(
                        target_face_index)
                    if len(relevant_face_index) == 2:
                        face_tracker.center_location = face_tracker.get_center_location(
                            relevant_face_index)
                        social_relation_estimator.couple_not_cnt = 0
                    elif social_relation_estimator.couple_not_cnt is not None:
                        social_relation_estimator.couple_not_cnt += 1

                        if social_relation_estimator.couple_not_cnt > 15:
                            face_tracker.center_location = None
                            social_relation_estimator.couple_not_cnt = None

                    ages = [
                        face_tracker.known_face_ages[
                            face_tracker.index_in_known_data[i]]
                        for i in relevant_face_index
                    ]
                    genders = [
                        face_tracker.known_face_genders[
                            face_tracker.index_in_known_data[i]]
                        for i in relevant_face_index
                    ]
                    names = [
                        face_tracker.known_face_names[
                            face_tracker.index_in_known_data[i]]
                        for i in relevant_face_index
                    ]
                    emotions = [
                        face_tracker.known_face_emotions[
                            face_tracker.index_in_known_data[i]]
                        for i in relevant_face_index
                    ]
                    emotion_probs = [
                        face_tracker.known_face_emotion_probs[
                            face_tracker.index_in_known_data[i]]
                        for i in relevant_face_index
                    ]
                    detect_cnts = [
                        face_tracker.known_face_detect_count[
                            face_tracker.index_in_known_data[i]]
                        for i in relevant_face_index
                    ]
                    # print(detect_cnts)

                    # Todo 190209
                    # 특정 얼굴 크기 이상일때만 작동하게.

                    social_relation_estimator.run(detect_cnts, ages, genders,
                                                  emotions, emotion_probs,
                                                  target_face_id)
                    # print(min(detect_cnts))
                # if done:
                # Release handle to the webcam
                # face_tracker.video_capture.release()
                # except Exception as ex:
                #     print("ex at main loop:",ex)
            else:
                _var = robot_control.run(_var, robot_face, None, None, frame,
                                         2, social_relation_estimator)
Esempio n. 7
0
plt = pyplot.imshow(currentImage)
pyplot.tight_layout()
pyplot.ion()
pyplot.show()

detector = {
    DetectorType.cv: CVDetector(),
    DetectorType.tflite: TFLiteDetector(args.classes_of_interest, args.score_threshold),
    DetectorType.edgetpu: EdgeTpuDetector(args.classes_of_interest, args.score_threshold)
}[args.detector]

detector.setup()

robotControl = None
if args.robot_control:
    robotControl = RobotControl(args.robot_mac)

try:
    while thr.is_alive():
        boundingBoxes = detector.detect(currentImage)
        boundingBoxes = sorted(boundingBoxes, key=methodcaller("surface"), reverse=True)

        if robotControl and not boundingBoxes:
            robotControl.stop()

        for i, boundingBox in enumerate(boundingBoxes):
            if i == 0:
                color = (0, 255, 0,)
            else:
                color = (255, 0, 0,)
            cv2.rectangle(currentImage,
Esempio n. 8
0
from flask import Flask
from flask import render_template
from flask import request
import subprocess
import sys
from robot_control import RobotControl
from werkzeug import secure_filename
import os
from robot_detection import RobotDetection
import time

cam = 0
cam_proc = 0

rc = RobotControl()
rd = RobotDetection(rc)

app = Flask(__name__)

# Create a directory in a known location to save files to.
uploads_dir = 'Photos'


@app.route('/')
def home():
    return render_template('index.html')


@app.route('/control')
def control():
Esempio n. 9
0
from time import sleep
import logging

from robot_control import RobotControl

logging.basicConfig(level=logging.INFO)

rbtCtrl = RobotControl()

sleep(1)

rbtCtrl.moveLeft()
sleep(1)
rbtCtrl.stop()

rbtCtrl.moveRight()
sleep(1)
rbtCtrl.stop()

rbtCtrl.moveUp()
sleep(1)
rbtCtrl.stop()

rbtCtrl.moveDown()
sleep(1)
rbtCtrl.stop()

del rbtCtrl
class RobotNavigation:
    def __init__(self):  # type: () -> None
        self._robot_state = RobotState()
        self._robot_control = RobotControl()
        self._goal_pool = GoalPool()
        self._grid = Grid()
        self._goal_selector = GoalSelector()
        self._path_finder = PathFinder()
        self._marker_drawer = MarkerDrawer()
        self._current_goal = None

    def update(self):  # type: () -> None
        """ Updates the navigation of the robot. """
        if not self._robot_state.received_all_data():
            return

        self._grid.update(self._robot_state)
        self._marker_drawer.draw_obstacles(self._grid.obstacles, self._robot_state)

        self._goal_pool.check_goals(self._robot_state)

        new_goal = self._goal_selector.select_goal(self._goal_pool.get_uncollected_goals(), self._grid,
                                                   self._robot_state)
        if new_goal is not self._current_goal and new_goal is not None:
            rospy.loginfo("Target: (%s %s), reward=%s" % (new_goal.x, new_goal.y, new_goal.reward))
        self._current_goal = new_goal

        self._marker_drawer.draw_goals(self._current_goal, self._goal_pool.goals, self._robot_state)

        # stop robot if no goal is selected
        if self._current_goal is None:
            self._robot_control.stop()
            return

        goal_grid_position = self._grid.nearby_free_grid_position((self._current_goal.x, self._current_goal.y),
                                                                  GOAL_RADIUS_SQRT)
        # set goal as unreachable if no grid position found
        if goal_grid_position is None:
            self._current_goal.unreachable = True
            self._robot_control.stop()
            return

        # find the path to the goal
        path = self._path_finder.find_path(self._grid.obstacles, self._robot_state.proximal_position,
                                           goal_grid_position)

        # check if path was found
        if len(path) <= 1:
            self._current_goal.unreachable = True
            self._robot_control.stop()
            return

        self._marker_drawer.draw_path(path, self._robot_state)

        # get furthest away target position which is still in sight
        target_position = self._grid.first_in_sight(path[:0:-1], self._robot_state.proximal_position)
        if target_position is None:
            target_position = path[1]

        # check if target position is in obstacle
        if self._grid.obstacles.__contains__(target_position):
            self._robot_control.stop()
            return

        self._robot_control.navigate(target_position, self._robot_state)
        self._marker_drawer.draw_direction(target_position, self._robot_state)