Esempio n. 1
0
    def stream_yolov3_personal(self):
        net = ObjectDetection(conf_thresh=0.8, nms_thresh=0.4)
        while True:
            begin = process_time()
            frame = Image.open(requests.get(self.url,
                                            stream=True).raw).convert('RGB')
            image, scale, padding = SingleImage(frame)[0]
            image = torch.unsqueeze(image, 0)
            detections = net.detect(image, scale, padding)
            image_with_detections = net.draw_result(frame,
                                                    detections[0],
                                                    show=False)
            opencvImage = cv2.cvtColor(np.array(image_with_detections),
                                       cv2.COLOR_RGB2BGR)
            end = process_time()
            print(end - begin)
            # show the frame to our screen
            cv2.imshow("Frame", opencvImage)
            key = cv2.waitKey(1) & 0xFF
            # if the 'q' key is pressed, stop the loop
            if key == ord("q"):
                break

        # close all windows
        cv2.destroyAllWindows()
Esempio n. 2
0
 def do_action(self):
     print("Searching")
     rospy.Subscriber('/mavros/local_position/pose', PoseStamped,
                      self._local_pose_callback)
     ObjectDetection.start_object_detection()
     self.result = ObjectDetection.detect_object()
     print("search-drone-pos: " +
           str(ObjectDetection.get_detection_position()))
     obj = ObjectDetection.get_detection_position()
     if obj.x is not 0 and obj.y is not 0 and obj.z is not 0:
         self.found = True
     print("-------------")
Esempio n. 3
0
def start_object_detection(video_id):
    response.content_type = "application/json; charset=UTF-8"

    object_detection = ObjectDetection(video_id)

    thread = threading.Thread(target=object_detection.detect)

    thread.start()

    data = {"video_id": video_id}
    _links = {
        "self": {
            "href":
            cv_api_url(
                format_route(START_OBJECT_DETECTION_ROUTE,
                             {"video_id": video_id}))
        },
        "cancel": {
            "href":
            cv_api_url(
                format_route(CANCEL_OBJECT_DETECTION_ROUTE,
                             {"video_id": video_id}))
        }
    }
    return _ok_res(data=data, _links=_links)
Esempio n. 4
0
    def m_parser(self, model_dir):

        m_file = open(model_dir + str("/cvexport.manifest"))
        data = json.load(m_file)

        # cvexport manifest prameters
        self.domain_type = str(data["DomainType"])
        print("Domain Type:", self.domain_type)

        # default anchors
        if str(self.domain_type) == "ObjectDetection":
            objdet = ObjectDetection(data, model_dir, None)
            ret = self.model_inference(objdet, iot_hub_manager, 1)
        elif str(self.domain_type) == "Classification":
            imgcls = ImageClassification(data, model_dir)
            ret = self.model_inference(imgcls, iot_hub_manager, 0)
        else:
            print(
                "Error: No matching DomainType: Object Detection/Image Classificaiton \n"
            )
            print("Exiting.....!!!! \n")
            sys.exit(0)
        if ret == 1:
            print("App finished running Inference...Exiting...!!!")
            sys.exit(1)
Esempio n. 5
0
    def load_model(self, model_dir, is_default_model, is_scenario_model):
        if is_default_model:
            print('[INFO] Loading Default Model ...')

            model = None

            with open(model_dir + str('/cvexport.manifest')) as f:
                data = json.load(f)

            # FIXME to check whether we need to close the previous session
            if data['DomainType'] == 'ObjectDetection':
                model = ObjectDetection(data, model_dir, None)
                return model

        elif is_scenario_model:
            print('[INFO] Loading Default Model ...')
            with open(model_dir + '/labels.txt', 'r') as f:
                labels = [l.strip() for l in f.readlines()]
            model = ONNXRuntimeObjectDetection(model_dir + '/model.onnx',
                                               labels)

            return model

        else:
            print('[INFO] Loading Model ...')
            with open('model/labels.txt', 'r') as f:
                labels = [l.strip() for l in f.readlines()]
            model = ONNXRuntimeObjectDetection('model/model.onnx', labels)

            return model

        return None
def main():
    debug_mode = rospy.get_param(NODE_NAME + "/debug", DEFAULT_DEBUG_MODE)
    if debug_mode:
        log_level = rospy.DEBUG
        rospy.loginfo("[%s] Launching object detection node in debug mode",
                      DEFAULT_DEBUG_MODE)
    else:
        log_level = rospy.INFO

    rospy.init_node(NODE_NAME, anonymous=True, log_level=log_level)
    update_rate = rospy.get_param('~update_rate', DEFAULT_UPDATE_RATE)
    playback = rospy.get_param("playback", DEFAULT_PLAYBACK)

    state_machine_input = StateMachineInput(NODE_NAME)
    object_detection = ObjectDetection(NODE_NAME, playback=playback)
    object_detection_state_machine = ObjectDetectionStateMachine(
        object_detection, state_machine_input, update_rate, NODE_NAME)

    rospy.loginfo('[%s] Object detection state machine successfully generated',
                  NODE_NAME)

    rospy.core.add_preshutdown_hook(
        lambda reason: object_detection_state_machine.request_shutdown())

    object_detection_state_machine.run()
    def load_model(self, model_dir, is_default_model, is_scenario_model):
        if is_default_model:
            print("[INFO] Loading Default Model ...")

            model = None

            with open(model_dir + str("/cvexport.manifest")) as f:
                data = json.load(f)

            # FIXME to check whether we need to close the previous session
            if data["DomainType"] == "ObjectDetection":
                model = ObjectDetection(data, model_dir, None)
                return model

        elif is_scenario_model:
            print("[INFO] Loading Default Model ...")
            with open(model_dir + "/labels.txt", "r") as f:
                labels = [l.strip() for l in f.readlines()]
            model = ONNXRuntimeObjectDetection(model_dir + "/model.onnx",
                                               labels)

            return model

        else:
            logger.info("Load Model ...")
            with open("model/labels.txt", "r") as f:
                labels = [l.strip() for l in f.readlines()]
            model = ONNXRuntimeObjectDetection("model/model.onnx", labels)
            logger.info("Load Model, success")

            return model

        return None
def test_object_detection(test_logger):
    od = ObjectDetection(model_config=default_model_config, logger=test_logger)
    tf = get_test_file()

    result = od.process_image_file(tf.name)
    tf.close()
    summary = {}
    for object_name, rating in result:
        if object_name not in summary:
            summary[object_name] = 0
        summary[object_name] += 1

    expected_summary = {'person': 8, 'chair': 8, 'tvmonitor': 1}
    if summary != expected_summary:
        test_logger.error("Summary doesn't match expected result")
        test_logger.error("Found: %s" % summary)
        test_logger.error("Expected: %s" % expected_summary)
        sys.exit(1)
Esempio n. 9
0
    def __init__(self):
        """Creates all the modules"""
        print("class: Controller created.")

        self.stances = ["default", "arm", "build", "dance", "battle_stance", ""]
        self.stance = "default"

        self.args = "|"
        self.using_joysticks = False
        self.speed1_x = 0
        self.speed1_y = 0
        self.speed2_x = 0
        self.speed2_y = 0
        self.arm = Arm()
        self.builder = Builder()
        self.dancing = Dancing()
        self.direction = ObjectDetection()
        self.movement = Movement()
        self.sound = SoundRecognition()
        self.reset = False
        self.drive = False

        # initialize bluetooth_connection connection
        self.host = connect.Connect(1, "host", 'B8:27:EB:36:3E:F8', 4, self)
        self.host.start()
        time.sleep(20)
        self.client = send.Send(2, "client", 'B8:27:EB:DE:5F:36', 5)
        self.client.start()
        time.sleep(10)

        self.msg_received = False
        # Add controller as observer to classes
        self.arm.addObserver(self)
        self.builder.addObserver(self)
        self.dancing.addObserver(self)
        self.direction.addObserver(self)
        self.movement.addObserver(self)
        self.sound.addObserver(self)

        self.robot = robot.Robot(6, 3)

        self.list = [self.arm, self.builder, self.dancing, self.direction, self.movement, self.sound]
Esempio n. 10
0
    def __init__(self, robot_client, grid_size, slope_pos, param, button_pin):

        self.__client = robot_client
        self.__params = param

        self.__objects = []
        self.__grid = None
        self.__grid_size = grid_size

        self.__workspace_name = ""
        self.__observation_pose = None
        self.__global_z_offset = 0.

        self.init_from_yaml()

        self.__object_detector = ObjectDetection(robot_client, self.__observation_pose, self.__workspace_name,
                                                 self.__global_z_offset,
                                                 grid_size)
        self.__robot = RobotPlayer(robot_client, self.__object_detector, self.__grid_size, slope_pos)
        self.__button_pin = button_pin
    def load_model(self, model_dir):
        print('[INFO] Loading Model ...')

        model = None

        with open(model_dir + str('/cvexport.manifest')) as f:
            data = json.load(f)

        # FIXME to check whether we need to close the previous session
        if data['DomainType'] == 'ObjectDetection':
            model = ObjectDetection(data, model_dir, None)
            return model

        return None
Esempio n. 12
0
 def play_turn(self):
     turn_detection = ObjectDetection(AdbCom().get_screenshot(), self.level,
                                      self.debug)
     player_pos = turn_detection.find_player()
     if self.simple:
         # y value should be enough since the jump angle is either 45 or 135 degrees.
         obj_y = turn_detection.next_block_y(*player_pos)
         y_diff = (obj_y - player_pos[1])
         log.info("Platform y distance: %s" % y_diff)
         press_time = int(sqrt(2 * y_diff * y_diff) *
                          self.pressure_unit) + self.pressure_padding
     else:
         # Computation using next platform full position
         objective_pos = turn_detection.next_block_pos(*player_pos)
         x_diff = (objective_pos[0] - player_pos[0])
         y_diff = (objective_pos[1] - player_pos[1])
         distance = sqrt(x_diff * x_diff + y_diff * y_diff)
         log.info("Platform distance: %s" % distance)
         press_time = int(
             distance * self.pressure_unit) + self.pressure_padding
     log.info("Asking move: press time %s " % press_time)
     AdbCom().send_keypress(press_time)
     self.level += 1
Esempio n. 13
0
def obj_detection(objX, objY, centerX, centerY):
    # signal trap to handle keyboard interrupt
    signal.signal(signal.SIGINT, signal_handler)

    # camera = VideoStream(usePiCamera=True).start()
    camera = cv2.VideoCapture(0)
    time.sleep(2.0)

    # Loading model
    obj = ObjectDetection()
    # loop indefinitely
    while True:
        # grab the frame from the threaded video stream and flip it
        # vertically (since our camera was upside down)
        grabbed, frame = camera.read()
        # frame = cv2.flip(frame, 0)
        frame = cv2.resize(frame, (300, 300))
        if not grabbed:
            break

# calculate the center of the frame as this is where we will
# try to keep the object
        (H, W) = frame.shape[:2]
        centerX.value = W // 2
        centerY.value = H // 2
        # find the object's location
        objectLoc = obj.object_detection(frame, (centerX.value, centerY.value))
        ((objX.value, objY.value), rect) = objectLoc
        print("obj (X, Y):", objX.value, objY.value)
        # extract the bounding box and draw it
        if rect is not None:
            (x, y, w, h) = rect
            cv2.rectangle(frame, (int(x), int(y)), (int(w), int(h)),
                          (0, 255, 0), 2)
        # display the frame to the screen
        cv2.imshow("Person Tracking", frame)
        cv2.waitKey(1)
def track_one_scene(detection_file: str):
    np.random.seed(0)
    # initialize filter
    config = FilterConfig(state_dim=6, measurement_dim=3)
    density = get_gaussian_density_NuScenes_CV()
    pmbm_filter = PoissonMultiBernoulliMixture(config, density)

    # get measurements
    with open(detection_file, 'r') as f:
        data = json.load(f)
    all_measurements = data['all_measurements']
    all_classes = data['all_classes']
    all_object_detections = {}
    for time_step in all_classes.keys():
        all_object_detections[time_step] = [
            ObjectDetection(z=np.array(measurement[:3]).reshape(3, 1),
                            obj_type=obj_type,
                            size=measurement[3:6],
                            height=measurement[6],
                            score=measurement[7],
                            empty_constructor=False) for measurement, obj_type
            in zip(all_measurements[time_step], all_classes[time_step])
        ]
    all_estimation = {}
    num_frames = len(all_object_detections.keys())
    for i_frame in range(num_frames):
        measurements = all_object_detections[str(i_frame)]
        # for meas in measurements:
        #     print(meas)
        # break

        print('Time step {}'.format(i_frame))
        all_estimation[i_frame] = pmbm_filter.run(measurements)

        print('After Update\n', pmbm_filter)
        print('\n-----------------------------\n')

    with open(
            './estimation-result/estimation-scene-0757-{}.json'.format(
                datetime.now().strftime("%Y%m%d-%H%M%S")), 'w') as outfile:
        json.dump(all_estimation, outfile)
from flask import Flask
from flask import request
from flask import jsonify

from object_detection import ObjectDetection

# Instantiate the object detector
object_detector = ObjectDetection()

# Instantiate Flask
app = Flask(__name__)


@app.route('/detect_objects', methods=['GET', 'OPTIONS'])
def detect_obejcts():
    json_args = request.args
    json_resp = {
        'success': None,
        'error': None,
        'labels': None,
    }
    if 'url' not in json_args:
        # User has not provided a valid request
        # return an informative error message and status code 400
        json_resp.update({
            'success': False,
            'error': 'No parameter `url` supplied in request.'
        })
        status_code = 400
    else:
        # User has supplied valid `url` parameter, but we still need to validate it's content
    [0, 1, 0, 0],
])
R = np.array([[100, 0], [0, 100]])

density = GaussianDensity(state_dim, meas_dim, F, Q, H, R, gating_size)

poisson = PointPoissonProcess(state_dim, meas_dim, prob_survival,
                              prob_detection, density, poisson_birth_weight,
                              poisson_prune_threshold, poisson_merge_threshold)

# get measurement
data = np.loadtxt('test_gating_measurements.txt')
measurements = [data[:, i].reshape(2, 1) for i in range(data.shape[1])]
z = measurements[0]
z = np.array([z[0, 0], z[1, 0], 0]).reshape(-1, 1)
detection = ObjectDetection(z, 'dummy', False)

poisson.predict([detection])
for component in poisson.intensity:
    print('weight:{}\tstate:{}'.format(component['w'], component['s']))

poisson.update()
for component in poisson.intensity:
    print('weight:{}\tstate:{}'.format(component['w'], component['s']))

print('Number of components before pruning {}'.format(len(poisson.intensity)))
poisson.intensity[-1]['w'] = 1
poisson.prune()
for component in poisson.intensity:
    print('weight:{}\tstate:{}'.format(component['w'], component['s']))
print('Number of components before pruning {}'.format(len(poisson.intensity)))
Esempio n. 17
0
from flask import Flask, render_template, Response
from camera import CameraStream
import cv2
from object_detection import ObjectDetection

app = Flask(__name__)

cap = CameraStream().start()

obj = ObjectDetection()


@app.route('/')
def index():
    """Video streaming home page."""
    return render_template('index.html')


def gen_frame():
    """Video streaming generator function."""
    while cap:
        frame = cap.read()
        frame = obj.predict(frame)
        convert = cv2.imencode('.jpg', frame)[1].tobytes()
        yield (b'--frame\r\n'
               b'Content-Type: image/jpeg\r\n\r\n' + convert + b'\r\n'
               )  # concate frame one by one and show result


@app.route('/video_feed')
def video_feed():
def main():
    # load NuScenes
    data_root = '/home/mqdao/Downloads/nuScene/v1.0-test'
    version = 'v1.0-test'
    nusc = NuScenes(version=version, dataroot=data_root, verbose=True)

    # load scene token
    with open('test_scene_tokens_centerPoint.json', 'r') as infile:
        val_scene_tokens = json.load(infile)

    # init tracking results for the whole val set
    tracking_results = {}

    for _, scene_token in tqdm(val_scene_tokens.items()):
        # get measurements
        with open('./centerPoint-per-scene-detection/test/{}.json'.format(scene_token), 'r') as f:
            data = json.load(f)
        all_measurements = data['all_measurements']
        all_classes = data['all_classes']
        all_object_detections = {}
        for time_step in all_classes.keys():
            all_object_detections[time_step] = [ObjectDetection(z=np.array(measurement[:3]).reshape(3, 1),
                                                                obj_type=obj_type,
                                                                size=measurement[3:6],
                                                                height=measurement[6],
                                                                score=measurement[7],
                                                                empty_constructor=False)
                                                for measurement, obj_type in
                                                zip(all_measurements[time_step], all_classes[time_step])]
        # initialize filter
        config = FilterConfig(state_dim=6, measurement_dim=3)
        density = get_gaussian_density_NuScenes_CV()
        pmbm_filter = PoissonMultiBernoulliMixture(config, density)

        current_sample_token = nusc.get('scene', scene_token)['first_sample_token']
        current_time_step = 0

        while current_sample_token != '':
            # initialize tracking results for this sample
            tracking_results[current_sample_token] = []
            # invoke filter and extract estimation
            measurements = all_object_detections[str(current_time_step)]
            if len(measurements) > 0:
                estimation = pmbm_filter.run(measurements)
                # log estimation
                for target_id, target_est in estimation.items():
                    sample_result = format_result(current_sample_token,
                                                  target_est['translation'] + [target_est['height']],
                                                  target_est['size'],
                                                  target_est['orientation'],
                                                  target_est['velocity'],
                                                  target_id,
                                                  target_est['class'],
                                                  target_est['score'])
                    tracking_results[current_sample_token].append(sample_result)
            # move on
            current_sample_token = nusc.get('sample', current_sample_token)['next']
            current_time_step += 1

    # save tracking result
    meta = {'use_camera': False, 'use_lidar': True, 'use_radar': False, 'use_map': False, 'use_external': False}
    output_data = {'meta': meta, 'results': tracking_results}
    with open('./estimation-result/all-results-validataion-set-{}.json'.format(datetime.now().strftime("%Y%m%d-%H%M%S")), 'w') as outfile:
        json.dump(output_data, outfile)
Esempio n. 19
0
    return show_frame


if __name__ == '__main__':
    print("打开视频")
    video = "./video/3.mp4"
    cap = cv.VideoCapture(video)

    print("初始化物体选择模型")
    history = 500
    var_threshold = 64
    learn_rate = 0.005
    bg_subtractor = cv.createBackgroundSubtractorMOG2(history, var_threshold, detectShadows=False)
    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
    detection_model = ObjectDetection(bg_subtractor, history, learn_rate, kernel)
    detection_model.train_model(cap)

    print("初始化物体追踪模型")
    split_line = 368
    centroid_threshold_square = 1300
    track_model = ObjectTrackKNN(split_line, centroid_threshold_square)

    print("初始化物体计数模型")
    counting_line = split_line + 50
    counting_model = ObjectCounting(counting_line)

    print("初始化物体计数日志")
    counting_log = ObjectCountingLog(split_line)

    print("初始化pipeline")
Esempio n. 20
0
config = {
    'host': 'localhost',
    'port': '8000',
    'key': '4TLbvfdfu1AG6fum5kxfEQpSb',
    'plug': 'TensorflowEdgeTPU',
    'mode': 'client',
    'type': 'detector',
    'notice': None,
    'connectionType': 'websocket',
    'label_path': './labelmap.txt',
    'model_path': './edgetpu_model.tflite',
    'confidence': 0.85
}

objectDetection = ObjectDetection(config)


async def cx(x):
    global config
    x.update({'pluginKey': config['key'], 'plug': config['plug']})
    await sio.emit('ocv', x)


@sio.event
async def connect():
    global config
    print('connected to server')
    await cx({
        'f': 'init',
        'plug': config['plug'],
Esempio n. 21
0
class TicTacToe:
    def __init__(self, robot_client, grid_size, slope_pos, param, button_pin):

        self.__client = robot_client
        self.__params = param

        self.__objects = []
        self.__grid = None
        self.__grid_size = grid_size

        self.__workspace_name = ""
        self.__observation_pose = None
        self.__global_z_offset = 0.

        self.init_from_yaml()

        self.__object_detector = ObjectDetection(robot_client, self.__observation_pose, self.__workspace_name,
                                                 self.__global_z_offset,
                                                 grid_size)
        self.__robot = RobotPlayer(robot_client, self.__object_detector, self.__grid_size, slope_pos)
        self.__button_pin = button_pin

    def init_from_yaml(self):
        file_path = os.path.join(os.getcwd(), "tic_tac_toe_config.yaml")
        config = load_yaml(file_path)

        self.__workspace_name = config["workspace_name"]
        self.__global_z_offset = config["global_z_offset"]
        self.__observation_pose = config["observation_pose"]

    # put green pieces back in storage and put all other pieces out of the workspace
    def init_game(self):
        self.__objects = self.__object_detector.detect_all_object()
        self.__grid = self.__object_detector.put_objects_on_grid(self.__objects)

        # sort from top left to bottom right
        self.__objects.sort(key=sort_score)
        self.__display_grid()
        i, j, k, l_ = 0, 0, 0, 0
        x, y, z = 0, 0, 0

        while len(self.__objects) > 0:
            obj = self.__objects.pop(0)

            if obj.color == ObjectColor.GREEN and i <= len(self.__grid[0]) and self.__params["slope"] == 0:
                if i <= 3:
                    x, y, z = i, 0, 0
                elif i == 4:
                    x, y, z = i - 1, 1, 0
                elif i > 4:
                    x, y, z = 3, -1, 0
                i += 1

            elif obj.color == ObjectColor.BLUE and l_ == 0 and self.__params["slope"] == 0:
                x, y, z = -0.7, -0.7, 0
                l_ = 1

            else:
                x, y, z = -0.7, 1.5, j
                j += 0.01
                k += 1

            print('pick and place :', x, y, z)
            self.__robot.pick_place_on_grid(self.__grid, obj, x, y, z=z)

    def play_game(self):
        self.init_game()

        self.__objects = self.__object_detector.detect_all_object()
        self.__grid = self.__object_detector.put_objects_on_grid(self.__objects)

        end_game = False
        while not end_game:  # loop for each turn
            grid_o = self.__grid
            # Player's turn
            self.__objects = self.__robot.wait_other_player(self.__params["turn_end"], self.__button_pin)
            self.__grid = self.__object_detector.put_objects_on_grid(self.__objects)
            self.__display_grid()

            self.cheat_detection(grid_o, self.__grid)
            compiled_grid = self.__compile_grid(self.__grid)
            point = self.ai_play(compiled_grid, self.__params["rand"])
            end_game = self.check_end_game(False)
            if end_game:
                return

            # Robot's turn
            self.__robot.robot_play(point[:2], self.__grid, self.__params["slope"])

            self.__objects = self.__object_detector.detect_all_object()
            self.__grid = self.__object_detector.put_objects_on_grid(self.__objects)
            end_game = self.check_end_game(True)

    def check_end_game(self, is_robot_turn):
        compiled_grid = self.__compile_grid(self.__grid)
        if not is_robot_turn and self.ai_win_check(compiled_grid) == -1:
            self.__robot.lose_action()
            return True

        elif is_robot_turn and self.ai_win_check(compiled_grid) == 1:
            self.__robot.win_action()
            return True
        elif self.count_plays(compiled_grid) == 9:
            self.__robot.none_action()
            return True

        return False

    def cheat_detection(self, grid_view_1, grid_view_2):
        grid_1 = self.__compile_grid(grid_view_1)
        grid_2 = self.__compile_grid(grid_view_2)
        change = 0
        for x in range(0, 3):
            for y in range(0, 3):
                if grid_1[x][y] != 0 and grid_2[x][y] == 0:
                    print("a circle has been removed", x, y)
                    self.__robot.cheat_action()
                elif grid_1[x][y] != 0 and grid_2[x][y] != grid_1[x][y]:
                    print("a circle has been replace", x, y)
                    self.__robot.cheat_action()
                elif grid_1[x][y] == 0 and grid_2[x][y] == -1:
                    change += 1
                    if change > 1:
                        print("player has play multiple time")
                        self.__robot.cheat_action()

    # count how many pieces are on the play grid
    @staticmethod
    def count_plays(compiled_grid):
        tot = 0
        for x in range(0, 3):
            for y in range(0, 3):
                tot += abs(compiled_grid[x][y])
        return tot

    # take a grid and check for winner
    # return the value of the winner (-1 = human / 1 = AI)
    @staticmethod
    def ai_win_check(compiled_grid):
        for case in compiled_grid:
            if case[0] == case[1] == case[2] != 0:
                return case[0]
        for x in range(0, 3):
            if compiled_grid[0][x] == compiled_grid[1][x] == compiled_grid[2][x] != 0:
                return compiled_grid[0][x]
        if compiled_grid[0][0] == compiled_grid[1][1] == compiled_grid[2][2] != 0:
            return compiled_grid[0][0]
        if compiled_grid[0][2] == compiled_grid[1][1] == compiled_grid[2][0] != 0:
            return compiled_grid[0][2]
        return 0

    # in the work space: BLUE = AI, RED = human
    def ai_play(self, compiled_grid, randomness):
        score_grid = [
            [0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0],
            [0.0, 0.0, 0.0]
        ]
        for x in range(3):
            for y in range(3):
                score_grid[x][y] = random.randint(0, randomness * 100) / 100000.0
        # Fill the score grid
        for x in range(0, 3):
            for y in range(0, 3):
                if compiled_grid[x][y] == 0:
                    compiled_grid[x][y] = 1
                    score_grid[x][y] += self.ai_recursive(compiled_grid, -1)
                    compiled_grid[x][y] = 0
                else:
                    score_grid[x][y] = -9 ** 9 - 1
        # take the best score from the grid
        best_x, best_y = 0, 0
        best = -128
        for x in range(0, 3):
            for y in range(0, 3):
                if score_grid[x][y] > best:
                    best = score_grid[x][y]
                    best_x, best_y = x, y

        print("AI evaluation ...")
        print("AI pLays: " + str([best_x, best_y]))
        return [best_x, best_y, best]

    # recursive scan for the best play and return a score for each grid (1.0 = win, 1.0 = draw, -1.0 = lose)
    def ai_recursive(self, compiled_grid, turn):
        win = self.ai_win_check(compiled_grid)
        if win != 0:
            return win
        score = [0, 0, 0]
        # lose, equal, win
        for x in range(0, 3):
            for y in range(0, 3):
                if compiled_grid[x][y] == 0:
                    compiled_grid[x][y] = turn
                    tmp = self.ai_recursive(compiled_grid, -turn)
                    if tmp > 0:
                        score[turn + 1] += tmp
                    elif tmp == 0:
                        score[1] += 1
                    else:
                        score[-turn + 1] += -tmp
                    compiled_grid[x][y] = 0
        if score[2] > 0:
            return turn * score[2] / 9
        elif score[1] > 0:
            return 0
        elif score[0] > 0:
            return -turn * score[0] / 9
        return 0

    # compile view_grid in grid (0 empty space, 1 blue circle, -1 red circle)
    def __compile_grid(self, grid):
        game_grid = [
            [0, 0, 0],
            [0, 0, 0],
            [0, 0, 0]
        ]
        if self.__params["slope"] == 1:
            grid = copy.deepcopy(grid)
            for x in range(0, 3):
                grid[x].insert(0, 0)

        for x in range(0, 3):
            for y in range(0, 3):
                if len(grid[x][y + 1]) > 0:
                    if grid[x][y + 1][0].color == ObjectColor.GREEN:
                        game_grid[x][y] = 1
                    elif grid[x][y + 1][0].color == ObjectColor.BLUE:
                        game_grid[x][y] = -1
        return game_grid

    # get an array of class Object and put it on a grid
    def __display_grid(self):
        rgb_dict = {ObjectColor.RED.value: " R ", ObjectColor.GREEN.value: " G ", ObjectColor.BLUE.value: " B "}
        case = []
        for case in self.__grid:
            print("+" + "---+" * len(case) + "\n|", end="")
            for objs in case:
                if len(objs) > 0:
                    print(rgb_dict[objs[0].color.value] if objs[0].color.value in rgb_dict else "", end="")
                else:
                    print("   ", end='')
                print("|", end='')
            print("")
        print("+" + "---+" * len(case))
Esempio n. 22
0
 def __init__(self):
     self.obj_detect = ObjectDetection()
     self.detect_face = FaceDetection()
     self.reconize_face = FaceRecognition()
Esempio n. 23
0
# shared variables and their respective mutual exclusion mechanisms
nav_status = None
nav_status_lock = threading.Lock()

command = 6 * [0]
command_lock = threading.Lock()

# global variables
mission_running = True
mission_mode = True
blocking = True

thread1 = None
thread2 = None

obj_detection = ObjectDetection()

# initializing serial connection with AUV_simulator
simulator_com = SerialConnection(port='com1', baud=115200)
simulator_com.start()

# wait for the first not empty message from AUV_simulator
while nav_status == None:
    receive_nav_status()

# threads running
start_threads()

# wait for threads termination
wait_for_threads_termination()
Esempio n. 24
0
def main():
    # test pmbm tracking in val split of NuScenes
    detection_file = '/home/mqdao/Downloads/nuScene/detection-megvii/megvii_val.json'
    data_root = '/home/mqdao/Downloads/nuScene/v1.0-trainval'
    version = 'v1.0-trainval'

    nusc = NuScenes(version=version, dataroot=data_root, verbose=True)

    # load detection
    with open(detection_file) as f:
        data = json.load(f)
    all_results = EvalBoxes.deserialize(data['results'], DetectionBox)
    meta = data['meta']
    print('meta: ', meta)
    print("Loaded results from {}. Found detections for {} samples.".format(
        detection_file, len(all_results.sample_tokens)))
    # to filter detection
    all_score_theshold = [0.35, 0.3, 0.25, 0.2]

    # init tracking results
    tracking_results = {}

    processed_scene_tokens = set()
    for sample_token_idx in tqdm(range(len(all_results.sample_tokens))):
        sample_token = all_results.sample_tokens[sample_token_idx]
        scene_token = nusc.get('sample', sample_token)['scene_token']
        if scene_token in processed_scene_tokens:
            continue

        # initialize filter
        config = FilterConfig(state_dim=6, measurement_dim=3)
        density = get_gaussian_density_NuScenes_CV()
        pmbm_filter = PoissonMultiBernoulliMixture(config, density)

        current_sample_token = nusc.get('scene',
                                        scene_token)['first_sample_token']
        current_time_step = 0

        while current_sample_token != '':
            # filter detections with low detection score
            sample_record = nusc.get('sample', current_sample_token)
            gt_num_objects = len(sample_record['anns'])
            filtered_detections = []
            i_threshold = 0
            while len(filtered_detections
                      ) < gt_num_objects and i_threshold < len(
                          all_score_theshold):
                filtered_detections = [
                    detection
                    for detection in all_results.boxes[current_sample_token]
                    if detection.detection_score >=
                    all_score_theshold[i_threshold]
                ]
                i_threshold += 1

            # create measurement for pmbm filter
            measurements = []
            for detection in filtered_detections:
                # get obj_type
                if detection.detection_name not in NUSCENES_TRACKING_NAMES:
                    continue
                obj_type = detection.detection_name
                # get object pose
                x, y, z = detection.translation
                quaternion = Quaternion(detection.rotation)
                yaw = quaternion.angle if quaternion.axis[
                    2] > 0 else -quaternion.angle
                # get object size
                size = list(detection.size)
                # get detection score
                score = detection.detection_score
                # create object detection
                obj_det = ObjectDetection(z=np.array([x, y,
                                                      yaw]).reshape(3, 1),
                                          size=size,
                                          obj_type=obj_type,
                                          height=z,
                                          score=score,
                                          empty_constructor=False)
                measurements.append(obj_det)

            # print('Time {} - Number of measurements: {}'.format(current_time_step, len(measurements)))

            # initialize tracking results for this sample
            tracking_results[current_sample_token] = []

            # invoke filter and extract estimation
            if len(measurements) > 0:
                estimation = pmbm_filter.run(measurements)
                # log estimation
                for target_id, target_est in estimation.items():
                    sample_result = format_result(
                        current_sample_token,
                        target_est['translation'] + [target_est['height']],
                        target_est['size'], target_est['orientation'],
                        target_est['velocity'], target_id, target_est['class'],
                        target_est['score'])
                    tracking_results[current_sample_token].append(
                        sample_result)

            # move on
            current_sample_token = sample_record['next']
            current_time_step += 1

        processed_scene_tokens.add(scene_token)

    # save tracking result
    output_data = {'meta': meta, 'results': tracking_results}
    with open(
            './estimation-result/all-results-validataion-set-{}.json'.format(
                datetime.now().strftime("%Y%m%d-%H%M%S")), 'w') as outfile:
        json.dump(output_data, outfile)
Esempio n. 25
0
class Controller(Observer):
    """This class manages all the modules"""
    def __init__(self):
        """Creates all the modules"""
        print("class: Controller created.")

        self.stances = ["default", "arm", "build", "dance", "battle_stance", ""]
        self.stance = "default"

        self.args = "|"
        self.using_joysticks = False
        self.speed1_x = 0
        self.speed1_y = 0
        self.speed2_x = 0
        self.speed2_y = 0
        self.arm = Arm()
        self.builder = Builder()
        self.dancing = Dancing()
        self.direction = ObjectDetection()
        self.movement = Movement()
        self.sound = SoundRecognition()
        self.reset = False
        self.drive = False

        # initialize bluetooth_connection connection
        self.host = connect.Connect(1, "host", 'B8:27:EB:36:3E:F8', 4, self)
        self.host.start()
        time.sleep(20)
        self.client = send.Send(2, "client", 'B8:27:EB:DE:5F:36', 5)
        self.client.start()
        time.sleep(10)

        self.msg_received = False
        # Add controller as observer to classes
        self.arm.addObserver(self)
        self.builder.addObserver(self)
        self.dancing.addObserver(self)
        self.direction.addObserver(self)
        self.movement.addObserver(self)
        self.sound.addObserver(self)

        self.robot = robot.Robot(6, 3)

        self.list = [self.arm, self.builder, self.dancing, self.direction, self.movement, self.sound]



        # self.controls()
        # self.joystick_controls()

    def has_received(self):
        try:
            self.client.controller_input("received")
        except:
            self.client = send.Send(2, "client", 'B8:27:EB:DE:5F:36', 5)
            self.client.start()
            time.sleep(10)
            self.has_received()

    def update(self, observable, arg):
        """Updates the modules"""
        print("\nUpdate executed")
        print(observable, "\narg:", arg)
        print(observable.__class__)

        if arg[0] == 0:
            arg[0] = "manual"
        elif arg[0] == 1:
            arg[0] = "battlestance"
        elif arg[0] == 2:
            arg[0] = "dab"
        elif arg[0] == 3:
            arg[0] = "ball"
        elif arg[0] == 4:
            arg[0] = "reset"
        elif arg[0] == 5:
            arg[0] = "dance"
        elif arg[0] == 6:
            arg[0] = "linedance"
        elif arg[0] == 7:
            arg[0] = "highstep"
        elif arg[0] == 8:
            arg[0] = "lowstep"
        elif arg[0] == 9:
            arg[0] = "drive"

        self.has_received()

        print("arg[0] converted:", arg[0])

        self.joystick_contsrols(arg)

    def execute(self, arg):
        if arg == "arm":
            self.arm.command()
        elif arg == "builder":
            self.builder.command()
        elif arg == "dancing":
            self.dancing.command()
        elif arg == "direction":
            self.direction.command()
        elif arg == "movement":
            self.movement.command()
        elif arg == "sound":
            self.sound.command()
        elif arg == "exit" or arg == "quit":
            print("closing program...")
            exit()

    def joystick_contsrols(self, args):
        """Commands the controls"""
        joyval_float1_x = args[1]
        joyval_float1_y = args[2]
        button1 = args[3] # Left Joystick
        joyval_float2_x = args[4]
        joyval_float2_y = args[5]
        button2 = args[6] # Right Joystick

        self.speed1_x = abs(200*joyval_float1_x)
        self.speed1_y = abs(200*joyval_float1_y)
        self.speed2_x = abs(200*joyval_float2_x)
        self.speed2_y = abs(200*joyval_float2_y)

        # test
        for arg in args:
            print("--", arg)
        if args[0] == "manual":
            if self.reset:
                self.robot.reset()
                self.reset = False
            self.robot.manual(joyval_float1_x, joyval_float1_y, button1, joyval_float2_x, joyval_float2_y, button2)
            self.drive = False
        elif args[0] == "highstep":
            if self.reset:
                self.reset = False
            self.robot.toggleStairs()
            self.robot.manual(joyval_float1_x, joyval_float1_y, button1, joyval_float2_x, joyval_float2_y, button2)
        elif button1 or args[0] == "reset":
            self.robot.reset()
        elif args[0] == "battlestance":
            self.robot.battlestance()
            self.drive = False
        elif args[0] == "drive" or button2:

            self.reset = True
            if not self.drive:
                self.robot.drive(self.drive)
                self.drive = True
            else:
                self.robot.drive(self.drive, joyval_float2_x, joyval_float2_y)

        elif args[0] == "dab":
            self.robot.dab()
        elif args[0] == "dance":
            self.robot.dance()
            self.reset = True
        else:
            print("do something else -> line 166 controller.py")

    def controls(self):
        """Command the controls"""
        while True:
            user_input = input("\nType a command:\n")
            print("\nyou typed: ", user_input)
            user_input = user_input.lower()

            if user_input == "help" or user_input == "h" or user_input == "0":
                self.commands_help(user_input)
            elif user_input == "arm" or user_input == "1":
                self.arm.command()
            elif user_input == "builder" or user_input == "2":
                self.builder.command()
            elif user_input == "dancing" or user_input == "3":
                self.dancing.command()
            elif user_input == "direction" or user_input == "4":
                self.direction.command()
            elif user_input == "movement" or user_input == "5":
                self.movement.command()
            elif user_input == "sound" or user_input == "6":
                self.sound.command()
            elif user_input == "exit" or user_input == "shutdown" or user_input == ".":
                print("closing program...")
                exit()
            else:
                self.commands_help(user_input)

    @staticmethod
    def commands_help(user_input):
        if user_input == "help" or user_input == "h" or user_input == "0":
            print("\nCommands:\n"
                  " [0] help\n"
                  " [1] arm\n"
                  " [2] builder\n"
                  " [3] dancing\n"
                  " [4] detection\n"
                  " [5] movement\n"
                  " [6] sound\n"
                  " [.] exit/shutdown")
        else:
            print("\nThere is no command: ", user_input,
                  "\nCommands:\n"
                  " [0] help\n"
                  " [1] arm\n"
                  " [2] builder\n"
                  " [3] dancing\n"
                  " [4] detection\n"
                  " [5] movement\n"
                  " [6] sound\n"
                  " [.] exit/shutdown")

    def default_stance(self):
        pass
Esempio n. 26
0

if __name__ == '__main__':
    print("打开视频")
    video = "./video/3.mp4"
    cap = cv.VideoCapture(video)

    print("初始化物体选择模型")
    history = 500
    var_threshold = 64
    learn_rate = 0.005
    bg_subtractor = cv.createBackgroundSubtractorMOG2(history,
                                                      var_threshold,
                                                      detectShadows=False)
    kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
    detection_model = ObjectDetection(bg_subtractor, history, learn_rate,
                                      kernel)
    detection_model.train_model(cap)

    print("初始化物体追踪模型")
    split_line = 368
    centroid_threshold_square = 1300
    track_model = ObjectTrackKNN(split_line, centroid_threshold_square)

    print("初始化物体计数模型")
    counting_line = split_line + 50
    counting_model = ObjectCounting(counting_line)

    print("初始化物体计数日志")
    counting_log = ObjectCountingLog(split_line)

    print("初始化pipeline")
Esempio n. 27
0
 def create_object_detection(self):
     self.object_detection = ObjectDetection(configuration.WEIGHTS_FILE,
                                             configuration.CLASSES_FILE,
                                             configuration.CONFIG_FILE)
def game_loop(args):
    fig = plt.figure()
    goal = np.array([-7.530000, 321.210205])
    # plt.plot(goal[0], goal[1], 'ro')
    try:
        client = carla.Client(args.host, args.port)
        client.set_timeout(2.0)
        global world
        world = World(client.get_world(), args)
        # world.vehicle.set_autopilot(True)
        global object_detection
        object_detection = ObjectDetection(world)
        global t1
        t1 = threading.Thread(target=object_detection.detect, name='t1')
        t1.start()

        # Path Planning part
        controller = VehiclePIDController(
            world.vehicle,
            args_lateral={
                'K_P': 1.0,
                'K_D': 0.07,
                'K_I': 0.02
            },
            args_longitudinal={
                'K_P': 1.0,
                'K_D': 0.09,
                'K_I': 0.2
            },
        )
        dwa = DWA(goal)
        config = Config()
        local_state = np.array([
            initial_state['x'], initial_state['y'], (90) * math.pi / 180.0,
            0.0, 0.0
        ])

        # plt.xlabel('Y Direction')
        # plt.ylabel('X Direction')

        # Simulation Starts
        curr_lanes = []
        objects = []
        while True:
            if world.camera_manager.cam_image is not None:
                cv2.imshow('camera image', world.camera_manager.cam_image)

            state = world.vehicle.get_transform()
            local_state[0] = state.location.x
            local_state[1] = state.location.y
            local_state[2] = (state.rotation.yaw) * math.pi / 180.0

            # Lane detection
            if world.camera_manager.seg_image is not None:
                image_lanes = Lanes_Image(world.camera_manager.seg_image)
                lines = image_lanes.get_lines()

                if world.camera_manager.dep_image is not None:
                    z = world.camera_manager.dep_image * 1000
                    x = world.camera_manager.x
                    res = np.zeros((HEIGHT, WIDTH))
                    for line in lines:
                        x1, y1, x2, y2 = line.astype(int)

                        if z[y1][x1] >= 300 or z[y2][x2] >= 300:
                            continue

                        p1 = transform([x[y1][x1], z[y1][x1]], state)
                        p2 = transform([x[y2][x2], z[y2][x2]], state)
                        cv2.line(res, (x1, y1), (x2, y2), (255, 0, 0), 2)

                        # Filtering lanes
                        lane = Lane(p1[0], p1[1], p2[0], p2[1])
                        if lane.type != 'N':
                            idx = lane.find_similar_lane(curr_lanes)
                            if idx == -1:
                                # no similar line
                                curr_lanes.append(lane)
                            else:
                                # similar with idx line
                                curr_lanes[idx] = lane.merge_lane(
                                    curr_lanes[idx])

                    cv2.imshow('result', res)

                    # plt.clf()
                    for obj_cord in object_detection.coordinates:
                        # print(obj_cord[0], obj_cord[1])
                        p = transform([obj_cord[0], obj_cord[1]], state)
                        objects.append(p)

                    print("Current lanes are : {}".format(len(curr_lanes)))
                    print("Current object are : {}".format(len(objects)))

                    # Drawing object and lanes
                    # for lane in curr_lanes:
                    #     if lane.weight > 2:
                    #         # print(lane.x1, lane.x2, lane.y1, lane.y2)
                    #         plt.plot(state.location.x, state.location.y, 'ro')
                    #         plt.plot([lane.x1, lane.x2], [lane.y1, lane.y2])
                    # for obj_cord in objects:
                    #     plt.plot(obj_cord[0], obj_cord[1], 'ro')

            u, predicted_trajectory = dwa.dwa_control(local_state,
                                                      np.array([]), [])
            px = local_state[0]
            py = local_state[1]
            local_state = dwa.motion(local_state, u,
                                     config.dt)  # simulate robot

            # Ploting trajactory of car-robot
            # plt.plot([local_state[0], px], [local_state[1], py])
            destination = carla.Location(
                x=local_state[0],
                y=local_state[1],
                z=state.location.z,
            )

            # move vehicle
            waypoint = world.world.get_map().get_waypoint(destination)
            control_signal = controller.run_step(local_state[3], waypoint)
            world.vehicle.apply_control(control_signal)
            # time.sleep(config.dt/2)

            # edge case to reach at goal
            dist_to_goal = math.hypot(local_state[0] - goal[0],
                                      local_state[1] - goal[1])
            if dist_to_goal <= config.robot_radius:
                print("Goal Reached!!")
                break

            # debug
            # cv2.waitKey(1)
            # plt.pause(0.05)
            # print(self._vehicle.get_location().y - initial_state['y'], self._state[1])
            # break
    finally:
        # plt.show()
        if world is not None:
            print("Destroying World")
            world.destroy()
Esempio n. 29
0
class App:
    def __init__(self):
        self._running = True
        self._screen_ = None
        self.size = configuration.SCREEN_WIDTH, configuration.SCREEN_HEIGHT

        self.pressed_left = False
        self.pressed_right = False
        self.pressed_up = False
        self.pressed_down = False
        self.mouse = None
        self.click = False

        self.background = None
        self.walls = []
        self.obstacles = set([])
        self.robot = None

        self.phase = None
        self.button = None
        self.start_position = None
        self.destinations = set([])
        self.object_images = []
        self.pasillo = pygame.image.load('pasillo.png')

        self.trace = []
        self.path_to_objective = []
        self.objective_position = None
        self.objective_name = None
        self.tree = dict()

        self.found_object = None
        self.object_detection = None

    def on_init(self):
        pygame.init()
        self._screen_ = pygame.display.set_mode(
            self.size, pygame.HWSURFACE | pygame.DOUBLEBUF)

        self.background = pygame.Surface(self._screen_.get_size())
        self.background = self.background.convert()
        self.background.fill(configuration.WHITE)

        # self.create_walls()
        self.create_robot()
        self.create_object_detection()

        self._running = True
        self.phase = PHASE.SET_OBSTACLES
        self.button = Button(configuration.BLACK, configuration.BUTTON_X,
                             configuration.BUTTON_Y,
                             configuration.BUTTON_WIDTH,
                             configuration.BUTTON_HEIGHT)

    def on_event(self, event):
        if event.type == pygame.QUIT:
            self._running = False

        elif event.type == pygame.KEYDOWN:  # check for key presses
            if event.key == pygame.K_LEFT:  # left arrow turns left
                self.pressed_left = True
            elif event.key == pygame.K_RIGHT:  # right arrow turns right
                self.pressed_right = True
            elif event.key == pygame.K_UP:  # up arrow goes up
                self.pressed_up = True
            elif event.key == pygame.K_DOWN:  # down arrow goes down
                self.pressed_down = True
            elif event.key == pygame.K_SPACE:  # down arrow goes down
                self.robot.automove = not self.robot.automove
                self.robot.auto_move_point = (0, 0)
        elif event.type == pygame.KEYUP:  # check for key releases
            if event.key == pygame.K_LEFT:  # left arrow turns left
                self.pressed_left = False
            elif event.key == pygame.K_RIGHT:  # right arrow turns right
                self.pressed_right = False
            elif event.key == pygame.K_UP:  # up arrow goes up
                self.pressed_up = False
            elif event.key == pygame.K_DOWN:  # down arrow goes down
                self.pressed_down = False

        if event.type == pygame.MOUSEBUTTONDOWN:
            self.click = True
        elif event.type == pygame.MOUSEBUTTONUP:
            self.click = False

        self.mouse = pygame.mouse.get_pressed()[0], pygame.mouse.get_pos(
        )[0], pygame.mouse.get_pos()[1]

    def on_loop(self):
        self.process_phase()

        if self.pressed_left:
            self.robot.rotate(configuration.ROBOT_SPEED)
        if self.pressed_right:
            self.robot.rotate(-configuration.ROBOT_SPEED)
        if self.pressed_up:
            self.robot.move(configuration.ROBOT_SPEED, True)
        if self.pressed_down:
            self.robot.move(configuration.ROBOT_SPEED, False)

        if self.robot.automove:
            self.robot.automove = self.robot.auto_move(
                configuration.ROBOT_SPEED)

        self.robot.update_sensors()
        self.calculate_sensor_intersections()

    def on_render(self):
        self._screen_.blit(self.background, (0, 0))

        pygame.draw.rect(self._screen_, configuration.BLACK,
                         (configuration.GAME_x, configuration.GAME_y,
                          configuration.GAME_width, configuration.GAME_height),
                         configuration.GAME_border)

        # self.draw_walls()
        self.draw_obstacles()

        self.draw_sensor_coverage(self.robot.proximity_sensors)
        self.draw_sensor_intersections(self.robot.proximity_sensors)

        self.draw_destinations()
        self.draw_tree()
        self.draw_path()

        self.robot.draw(self._screen_)

        self.draw_ui()

        pygame.display.flip()

    def on_cleanup(self):
        pygame.quit()

    def on_execute(self):
        if self.on_init() == False:
            self._running = False

        while self._running:
            for event in pygame.event.get():
                self.on_event(event)
            self.on_loop()
            self.on_render()
        self.on_cleanup()

    def create_walls(self):
        self.walls.append(Wall(20, 20, 20, 100, configuration.RED))
        self.walls.append(Wall(60, 350, 120, 200, configuration.RED))
        self.walls.append(Wall(300, 20, 20, 20, configuration.RED))
        self.walls.append(Wall(400, 50, 300, 120, configuration.RED))
        self.walls.append(Wall(500, 60, 500, 220, configuration.RED))
        self.walls.append(Wall(100, 500, 500, 320, configuration.RED))
        self.walls.append(Wall(300, 400, 100, 120, configuration.RED))

    def create_robot(self):
        proximity_sensors = [
            ProximitySensor(0, configuration.ROBOT_HEIGHT,
                            configuration.SENSOR_ANGLE_RANGE,
                            configuration.SENSOR_DISTANCE,
                            configuration.SENSOR_1_ORIENTATION),
            ProximitySensor(0, -configuration.ROBOT_WIDTH,
                            configuration.SENSOR_ANGLE_RANGE,
                            configuration.SENSOR_DISTANCE,
                            configuration.SENSOR_2_ORIENTATION),
            ProximitySensor(configuration.ROBOT_WIDTH, 0,
                            configuration.SENSOR_ANGLE_RANGE,
                            configuration.SENSOR_DISTANCE,
                            configuration.SENSOR_3_ORIENTATION)
        ]

        self.robot = Robot(configuration.ROBOT_INITIAL_POS_X,
                           configuration.ROBOT_INITIAL_POS_Y,
                           configuration.ROBOT_WIDTH,
                           configuration.ROBOT_HEIGHT, proximity_sensors)

    def create_destination(self, p, image_path):
        x, y = p
        image = pygame.image.load(image_path)
        dest = Destination(x, y, configuration.GREEN,
                           configuration.DESTINATION_WIDTH, image, image_path)
        self.destinations.add(dest)

    def create_object_detection(self):
        self.object_detection = ObjectDetection(configuration.WEIGHTS_FILE,
                                                configuration.CLASSES_FILE,
                                                configuration.CONFIG_FILE)

    def process_phase(self):

        clicked = self.mouse[0]
        m_x, m_y = self.mouse[1], self.mouse[2]

        if self.click and point_inside_rect(self.button.x, self.button.y,
                                            self.button.width,
                                            self.button.height, m_x, m_y):
            self.change_phase()

        if self.phase is PHASE.SET_OBSTACLES:

            if clicked and self.point_inside_game(m_x, m_y):
                self.obstacles.add(Obstacle(m_x, m_y, configuration.BLACK, 10))

        elif self.phase is PHASE.SET_START:

            if clicked and self.point_inside_game(m_x, m_y):
                self.start_position = (m_x, m_y)
                self.robot.x, self.robot.y = self.start_position

        elif self.phase is PHASE.SET_DESTINATIONS:
            n_destinations = len(self.destinations)
            if self.click and self.point_inside_game(
                    m_x,
                    m_y) and n_destinations < configuration.N_DESTINATIONS:
                self.create_destination(
                    (m_x, m_y),
                    configuration.DESTINATION_IMAGE_PATHS[n_destinations])
                self.click = False

        elif self.phase is PHASE.CHOOSE_DESTINATION:

            if not self.objective_name:

                voice_data = speech_recognition_module.record_audio()
                word = speech_recognition_module.respond(voice_data)

                if word == "manzana":
                    print("voy a por la manzana")
                    self.objective_name = 'apple'
                elif word == "botella":
                    print("voy a por la botella")
                    self.objective_name = 'bottle'
                elif word == "libro":
                    print("voy a por el libro")
                    self.objective_name = 'book'
                elif word == "taza":
                    print("voy a por la taza")
                    self.objective_name = 'cup'
                else:
                    print("repite")
                    time.sleep(0.5)

        elif self.phase is PHASE.NAVIGATION:
            random_point = random_point_within_rect(
                configuration.GAME_x + configuration.GAME_border,
                configuration.GAME_y + configuration.GAME_border,
                configuration.GAME_width - configuration.GAME_border,
                configuration.GAME_height - configuration.GAME_border)

            is_valid_point, parent_node, next_point = navigation.rrt(
                random_point, self.tree, self.obstacles, configuration.STEP)

            if is_valid_point and next_point:
                next_point = (next_point[0], next_point[1])

                # Check new point not in obstacle or already visited
                if next_point not in self.tree:
                    self.tree[next_point] = parent_node

                # Check found objective
                destination = self.check_reached_objective(next_point)
                if destination:

                    pygame.display.update(
                        self._screen_.blit(
                            pygame.transform.scale(
                                destination.image,
                                (configuration.CAMERA_WIDTH,
                                 configuration.CAMERA_HEIGHT)),
                            (configuration.CAMERA_X, configuration.CAMERA_Y)))

                    destination_label = self.identify_object(
                        destination.image_path)

                    if destination_label == self.objective_name:
                        self.objective_position = next_point
                        self.destinations.clear()
                        self.destinations = [destination]
                        self.change_phase()
                    else:
                        self.found_object = None
                        self.destinations.remove(destination)

        elif self.phase is PHASE.GO_DESTINATION:
            if self.path_to_objective:
                if self.robot.automove:
                    self.robot.automove = self.robot.auto_move(
                        configuration.ROBOT_SPEED)
                else:
                    self.robot.auto_move_point = self.path_to_objective.pop()
                    self.robot.automove = True

        elif self.phase is PHASE.RETURN_TO_START:
            if self.trace:
                if self.robot.automove:
                    self.robot.automove = self.robot.auto_move(
                        configuration.ROBOT_SPEED)
                else:
                    self.robot.auto_move_point = self.trace.pop(0)
                    self.robot.automove = True
            else:
                self.change_phase()

        elif self.phase is PHASE.DELIVER_OBJECT:
            speech_recognition_module.speak("Aquí tiene su " +
                                            str(self.objective_name))

    def change_phase(self):

        self.phase += 1
        self.click = False

        if self.phase is PHASE.SET_OBSTACLES:
            self.button.colour = configuration.RED

        elif self.phase is PHASE.SET_START:
            self.button.colour = configuration.GREEN

        elif self.phase is PHASE.SET_DESTINATIONS:
            self.button.colour = configuration.BLUE
            self.tree[self.start_position] = self.start_position

        elif self.phase is PHASE.CHOOSE_DESTINATION:
            self.button.colour = configuration.GREEN

        elif self.phase is PHASE.NAVIGATION:
            self.button.colour = configuration.BLACK

        elif self.phase is PHASE.GO_DESTINATION:
            self.button.colour = configuration.ORANGE

            # build path to objective
            curr_pos = self.objective_position
            self.trace.append(curr_pos)

            while curr_pos != self.start_position:
                curr_pos = self.tree[curr_pos]
                self.trace.append(curr_pos)

            self.path_to_objective = self.trace.copy()

        elif self.phase is PHASE.RETURN_TO_START:
            self.button.colour = configuration.BLACK
            self.robot.carry_object_image = self.destinations[0].image
            self.destinations.clear()

        else:
            self.button.colour = configuration.YELLOW

    def calculate_sensor_intersections(self):
        walls = [(wall.point1, wall.point2) for wall in self.walls]
        obstacles = [(obstacle.x, obstacle.y, obstacle.width)
                     for obstacle in self.obstacles]
        for sensor in self.robot.proximity_sensors:
            sensor.calculate_intersections(walls)
            sensor.calculate_intersections(obstacles)

    def identify_object(self, image_path):
        return self.object_detection.get_prediction(image_path)

    # Function Definition : Point inside Game ?
    def point_inside_game(self, x, y):
        return point_inside_rect(configuration.GAME_x, configuration.GAME_y,
                                 configuration.GAME_width,
                                 configuration.GAME_height, x, y)

    def check_reached_objective(self, point):
        for dest in self.destinations:
            if point_inside_rect(dest.x - dest.width / 2,
                                 dest.y - dest.width / 2, dest.width,
                                 dest.width, point[0], point[1]):
                return dest
        return False

    def draw_walls(self):
        for wall in self.walls:
            wall.draw(self._screen_)

    def draw_obstacles(self):
        for obstacle in self.obstacles:
            obstacle.draw(self._screen_)

    def draw_sensor_coverage(self, proximity_sensors):
        for prox_sensor in proximity_sensors:
            prox_sensor.draw_coverage(self._screen_, configuration.ORANGE)

    def draw_sensor_intersections(self, proximity_sensors):
        for prox_sensor in proximity_sensors:
            prox_sensor.draw_intersections(self._screen_, configuration.RED,
                                           configuration.YELLOW)

    def draw_destinations(self):
        for dest in self.destinations:
            dest.draw(self._screen_)

    def draw_tree(self):
        for key, value in self.tree.items():
            pygame.draw.line(self._screen_, configuration.BLUE, key, value)

    def draw_path(self):
        for p in self.trace:
            pygame.draw.line(self._screen_, configuration.GREEN, p,
                             self.tree[p], 3)

    def draw_ui(self):

        self.button.draw(self._screen_)
        self.button.draw_text(self._screen_, configuration.WHITE)

        self.draw_text_center_rect(
            self._screen_, configuration.BLACK, 'Sensor (Right) Distance: ' +
            str(round(self.robot.proximity_sensors[0].detected_distance, 2)), [
                configuration.CHATBOX_X - 100, configuration.CHATBOX_Y,
                configuration.PHASE_DESCRIPTION_WIDTH, 15
            ])
        self.draw_text_center_rect(
            self._screen_, configuration.BLACK, 'Sensor (Left) Distance: ' +
            str(round(self.robot.proximity_sensors[1].detected_distance, 2)), [
                configuration.CHATBOX_X - 100, configuration.CHATBOX_Y + 15,
                configuration.PHASE_DESCRIPTION_WIDTH, 15
            ])
        self.draw_text_center_rect(
            self._screen_, configuration.BLACK, 'Sensor (Center) Distance: ' +
            str(round(self.robot.proximity_sensors[2].detected_distance, 2)), [
                configuration.CHATBOX_X - 100, configuration.CHATBOX_Y + 30,
                configuration.PHASE_DESCRIPTION_WIDTH, 15
            ])

        image = None
        if self.found_object:
            image = self.found_object.image
        else:
            image = self.pasillo

        self._screen_.blit(
            pygame.transform.scale(
                image,
                (configuration.CAMERA_WIDTH, configuration.CAMERA_HEIGHT)),
            (configuration.CAMERA_X, configuration.CAMERA_Y))

        self.draw_text_center_rect(
            self._screen_, configuration.BLACK,
            configuration.PHASE_DESCRIPTION[self.phase - 1], [
                configuration.PHASE_DESCRIPTION_X,
                configuration.PHASE_DESCRIPTION_Y,
                configuration.PHASE_DESCRIPTION_WIDTH,
                configuration.PHASE_DESCRIPTION_HEIGHT
            ])

        self.draw_text_center_rect(
            self._screen_, configuration.BLACK, 'CAMERA', [
                configuration.CAMERA_LABEL_X, configuration.CAMERA_LABEL_Y,
                configuration.CAMERA_LABEL_WIDTH,
                configuration.CAMERA_LABEL_HEIGHT
            ])
        self.draw_text_center_rect(
            self._screen_, configuration.BLUE,
            configuration.PHASE_TITLE[self.phase - 1], [
                configuration.PHASE_DESCRIPTION_LABEL_X,
                configuration.PHASE_DESCRIPTION_LABEL_Y,
                configuration.PHASE_DESCRIPTION_LABEL_WIDTH,
                configuration.PHASE_DESCRIPTION_LABEL_HEIGHT
            ])

    # Function Definition : Text on Button
    def draw_text_center_rect(self, screen, color, text, rect, font_size=18):
        font = pygame.font.SysFont("Calibri", font_size)
        text = font.render(text, True, color)
        textRect = text.get_rect()
        textRect.center = (rect[0] + rect[2] / 2, rect[1] + rect[3] / 2)
        screen.blit(text, textRect)
Esempio n. 30
0
#%%

if __name__ == '__main__':
    
    instance = ObjectDetection(
    working_dir = '.',
    import_image_path = './aoi_image.tif',
    download_image = False,
    aoi_path = None,
    username = '',
    password = '',
    date_range = ('20190101', '20200101'),
    mask_path = './aoi_mask.geojson',
    # mask_path = None,
    out_dir = '.',
    step = 4,
    prediction_threshold = 0.90,
    pos_chip_dir = ('./positive_chips/'),
    neg_chip_dir = ('./negative_chips/'),
    chip_width = 20,
    chip_height = 20,
    augment_pos_chips = True,
    augment_neg_chips = False,
    save_model = False,
    import_model = None,
    segmentation = False)
    
    instance.execute()
    
Esempio n. 31
0
class PeopleDetection:
    """
		The class recognizes people from the image and using face recognition detects their name
	"""
    def __init__(self):
        self.obj_detect = ObjectDetection()
        self.detect_face = FaceDetection()
        self.reconize_face = FaceRecognition()

    def detect(self, frame):
        """
			Detect people
			:param frame -- numpy image
			:return -- details of each person(name, x, y, confidence, time_when_detected)
		"""

        details = []
        detections = self.obj_detect.detect(frame)

        # Get the objects detected by ObjectDetection
        for i in range(detections.shape[0]):
            confidence = detections[i, 2]
            idx = int(detections[i, 1])

            # If the object is a person and high confidence continue
            if confidence > 0.25 and CLASSES[idx] == "person":

                (h, w) = frame.shape[:2]
                box = detections[i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # In the person subframe detect faces(mostly should get 1 face)
                sub_frame = frame[startY:endY, startX:endX, :]
                face_locations = self.detect_face.detect(sub_frame)
                area = float((endX - startX) * (endY - startY)) / (h * w)

                # For each face get the person's name
                for face_location in face_locations:
                    if face_location == ():
                        continue

                    (face_start_x, face_start_y, face_end_x,
                     face_end_y) = face_location
                    if face_end_x + startX > endX:
                        continue

                    # Draw the face
                    cv2.rectangle(
                        frame, (face_start_x + startX, face_start_y + startY),
                        (face_end_x + startX, face_end_y + startY),
                        (255, 0, 0), 2)

                    # Using Face recognition get the face and its score
                    name, score = self.reconize_face.face_recognize(
                        sub_frame, face_location)

                    # Important Conditions
                    face_h = face_end_y - face_start_y
                    face_w = face_end_x - face_end_y
                    y = startY - 15 if startY - 15 > 15 else startY + 15
                    start_x = np.floor(face_start_x - (face_w) / 4 + startX)
                    end_x = np.ceil(face_end_x + (face_w) / 4 + startX)
                    start_y = np.floor(face_end_y + 3 * (face_h) / 16 + startY)
                    end_y = np.ceil(face_end_x + 5 * (face_h) / 2 + startY)
                    t_start_x = int(start_x) if start_x > startX else startX
                    t_start_y = int(start_y) if start_y < endY else endY
                    t_end_x = int(end_x) if end_x < endX else endX
                    t_end_y = int(end_y) if end_y < endY else endY

                    cv2.rectangle(frame, (t_start_x, t_start_y),
                                  (t_end_x, t_end_y), (255, 0, 0), 2)
                    # Area covered by the person of the screen
                    details.append({
                        'name':
                        name,
                        'x':
                        int((startX + endX) / 2),
                        'y':
                        int((startY + endY) / 2),
                        'score':
                        score,
                        'area':
                        area,
                        'time':
                        strftime("%Y-%m-%d %H:%M:%S", gmtime())
                    })

                # Color detected person
                sub_frame[:, :, 2] = sub_frame[:, :, 2] + 80
                cv2.circle(frame, ((startX + endX) / 2, (startY + endY) / 2),
                           3, (0, 255, 0), -1)

        # For no person detected, we require this for path planning
        if len(details) == 0:
            details.append({
                'name': "No_person",
                'x': int(480),
                'y': int(270),
                'score': 10.0,
                'area': 0.0,
                'time': strftime("%Y-%m-%d %H:%M:%S", gmtime())
            })

        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            sys.exit(0)
        return details