def initPose(self):
     self.pose = pose.Pose()
     self.prev_pose = pose.Pose()
     self.prev_pose.x = 0
     self.prev_pose.y = 0
     self.prev_pose.theta = 0
     self.pose.x = 0
     self.pose.y = 0
     self.pose.theta = 0
コード例 #2
0
    def __draw(self):
        """Draws the world and items in it.
        
           This will draw the markers, the obstacles,
           the robots, their tracks and their sensors
        """
        
        if self.__robot is not None and self.__center_on_robot:
            if self.__orient_on_robot:
                self.__renderer.set_screen_center_pose(self.__robot.get_pose())
            else:
                self.__renderer.set_screen_center_pose(pose.Pose(self.__robot.get_pose().x, self.__robot.get_pose().y, 0.0))

        self.__renderer.clear_screen()
        if self.__draw_supervisors and self.__supervisor is not None:
            self.__supervisor.draw_background(self.__renderer)
        for bg_object in self.__background:
            bg_object.draw(self.__renderer)

        # Draw the robot, tracker and sensors after obstacles
        if self.__show_tracks and self.__tracker is not None:
            self.__tracker.draw(self.__renderer)
        if self.__robot is not None:
            self.__robot.draw(self.__renderer)
            if self.__show_sensors:
                self.__robot.draw_sensors(self.__renderer)

        if self.__draw_supervisors and self.__supervisor is not None:
            self.__supervisor.draw_foreground(self.__renderer)

        # update view
        self.__update_view()
コード例 #3
0
ファイル: batch.py プロジェクト: SteveGeyer/Firefly
def main():
    """Execute the command"""
    parser = argparse.ArgumentParser(
        description='Run image processing over test images')
    parser.add_argument('-i',
                        '--id',
                        help='marker ID to find',
                        required=False,
                        type=int,
                        default=2)
    parser.add_argument('-r',
                        '--results',
                        help='result directory',
                        required=False,
                        default='results')
    parser.add_argument('-s',
                        '--source',
                        help='source directory for images',
                        required=False,
                        default='images')
    args = parser.parse_args()

    # Make sure output directory exists before we start.
    try:
        os.mkdir(args.results)
    except FileExistsError:
        pass

    # Process files.
    p = pose.Pose()
    for name in os.listdir(args.source):
        frame = cv2.imread(args.source + '/' + name)
        p.solve(frame, args.id)
        cv2.imwrite(args.results + '/' + name, p.display_results())
コード例 #4
0
    def publish(self):
        if self.controller.at_goal(self.pose, self.goal):
            desired = pose.Pose()
        else:
            desired = self.controller.get_velocity(self.pose, self.goal,
                                                   self.dT)

        # if self.goal is not None \
        #    and (desired.xVel!=0.0 or desired.thetaVel!=0.0):
        #     rospy.loginfo(
        #         'current=(%f,%f,%f) goal=(%f,%f,%f)  xVel=%f thetaVel=%f',
        #         self.pose.x, self.pose.y, self.pose.theta,
        #         self.goal.x, self.goal.y, self.goal.theta,
        #         desired.xVel, desired.thetaVel)

        d = self.controller.get_goal_distance(self.pose, self.goal)
        self.dist_pub.publish(d)

        self.send_velocity(desired.xVel, desired.thetaVel)

        # Forget the goal if achieved.
        if self.controller.at_goal(self.pose, self.goal):
            rospy.loginfo('Goal achieved')
            self.goal = None
            msg = Bool()
            msg.data = True
            self.goal_achieved_pub.publish(msg)
コード例 #5
0
    def run_periodic(self):

        js_left = self.my_robot.js_left
        js_right = self.my_robot.js_right
        spenner = 1
        spenner_button = 1
        tank_button = 3
        qt_button = 2

        if js_left.getRawButton(8):
            self.my_robot.drive.ahrs.reset()
            pose._estimator.current_pose = pose.Pose(0, 0, 0)

        if js_left.getRawButton(spenner_button) or js_right.getRawButton(
                spenner_button):
            spenner = 0.7

        if js_left.getRawButton(tank_button) or js_right.getRawButton(
                tank_button):
            self.my_robot.drive.tank_drive(-spenner * js_left.getY(),
                                           -spenner * js_right.getY())
        elif js_left.getRawButton(qt_button) or js_right.getRawButton(
                qt_button):
            self.my_robot.drive.arcade_drive(
                spenner * mathutils.signed_power(-js_left.getY(), 2),
                spenner * mathutils.signed_power(-js_right.getX(), 2))
        else:
            self.my_robot.drive.radius_drive(
                -mathutils.signed_power(js_left.getY(), 2),
                mathutils.signed_power(js_right.getX(), 2), spenner)
コード例 #6
0
 def __init__(self, parser):
     """Initialize the flying code."""
     self.pose = pose.Pose()
     self.cmd = None
     self.marker_id = 0
     self.flying = False
     self.armed = False
     self.image_count = 0
     self.height_pid = pid.PID(False)
     self.height_pid.set_set_point(
         20.0)  # We want to get to height of gate.
     self.height_pid.set_output_limits(-1.0, 1.0)
     self.height_pid.set_initial_output(-1.0)
     self.height_pid.set_tunings(0.02, 0.0, 0.0, True)
     self.missed_data = 0
     parser.add_argument('-i',
                         '--id',
                         help='marker ID to find',
                         required=False,
                         type=int,
                         default=2)
     parser.add_argument('-t',
                         '--ttyname',
                         help='Serial tty to transmitter.',
                         required=False,
                         default='/dev/ttyACM0')
コード例 #7
0
    def __init__(self, chassis_model):
        self.sim_chassis = chassis_model
        self.current_pose = pose.Pose(0, 0, 0, 0, 0, 0)
        self.leg_controllers = [
            lc.LegController(leg, IK_3DoF, legpose, self.current_pose) for leg,
            legpose in zip(self.sim_chassis.legs, self.sim_chassis.leg_poses)
        ]
        self.step_state = True

        # Shared movement state
        self.current_time = 0
        self.interval_estimate = ESTIMATE_REFRESH_RATE

        # Pose state
        self.pose_deadline = 0
        self.pose_start_time = 0
        self.pose_interpolator = None

        # Step state
        self.step_deadline = 0
        self.step_start_time = 0
        self.current_step = 0

        # Gait: iterable of steps, each step is ((legs to pick up), deadline, progress before next step)
        self.gait = (((0, 2, 4), 1., 1.), ((1, 3, 5), 1., 1.))

        # Current heading/rotation
        self.x_vel = 0
        self.y_vel = 0
        self.r_vel = 0
コード例 #8
0
	def leaderOdomCallback(self, newPose):
		self.leaderPose = pose.Pose()
		pos = newPose.pose.pose.position
		orientation = newPose.pose.pose.orientation
		self.leaderPose.x = pos.x
		self.leaderPose.y = pos.y
		quaternion = (orientation.x, orientation.y, orientation.z, orientation.w)
		euler = tf.transformations.euler_from_quaternion(quaternion)
		self.leaderPose.theta = euler[2]
 def goalCallback(self, goal):
     self.goal = pose.Pose()
     pos = goal.pose.position
     orientation = goal.pose.orientation
     self.goal.x = pos.x
     self.goal.y = pos.y
     quaternion = (orientation.x, orientation.y, orientation.z,
                   orientation.w)
     euler = tf.transformations.euler_from_quaternion(quaternion)
     self.goal.theta = euler[2]
コード例 #10
0
    def get_angle_pose(self, quaternion_pose):
        q = [quaternion_pose.orientation.x,
             quaternion_pose.orientation.y,
             quaternion_pose.orientation.z,
             quaternion_pose.orientation.w]
        roll, pitch, yaw = euler_from_quaternion(q)

        angle_pose = pose.Pose()
        angle_pose.x = quaternion_pose.position.x
        angle_pose.y = quaternion_pose.position.y
        angle_pose.theta = yaw
        return angle_pose
コード例 #11
0
    def __init__(self,
                 leg_model,
                 ik_func,
                 pose=pose.Pose(0, 0, 0, 0, 0, 0),
                 base_pose=pose.Pose(0, 0, 0, 0, 0, 0)):
        self.sim_leg = leg_model
        self.ik_func = ik_func
        self.pose = pose
        self.base_pose = base_pose
        self.to_frame_mat = self.pose.to_frame_mat.dot(
            self.base_pose.to_frame_mat)
        self.from_frame_mat = self.base_pose.from_frame_mat.dot(
            self.pose.from_frame_mat)
        self.home_point = self.leg_to_global(2.5, 0, 0)

        # Movement state
        self.deadline = 0
        self.current_time = 0
        self.interval_estimate = ESTIMATE_REFRESH_RATE
        self.dest = None
        self.move_interpolator = None
コード例 #12
0
 def __init__(self, legs, leg_poses):
     self.legs = legs
     self.leg_poses = leg_poses
     self.num_legs = len(self.legs)
     self.origin = pose.Pose(0, 0, 0, 0, 0, 0)
     if self.num_legs > 1:
         leg_points = [(lpose.x, lpose.y, lpose.z, 1.)
                       for lpose in leg_poses]
         self.base_segments = [(leg_points[x - 1], leg_points[x])
                               for x in xrange(self.num_legs)]
     else:
         self.base_segments = []
コード例 #13
0
def get_test_chassis():
    import math
    midwidth = 2.
    topwidth = 1.5
    height = 2.5

    top_r_p = pose.Pose(topwidth, height, 0, math.atan2(height, topwidth), 0,
                        0)
    mid_r_p = pose.Pose(midwidth, 0, math.atan2(0, midwidth), 0, 0, 0)
    bot_r_p = pose.Pose(topwidth, -height, 0, math.atan2(-height, topwidth), 0,
                        0)
    bot_l_p = pose.Pose(-topwidth, -height, 0, math.atan2(-height, -topwidth),
                        0, 0)
    mid_l_p = pose.Pose(-midwidth, 0, 0, math.atan2(0, -midwidth), 0, 0)
    top_l_p = pose.Pose(-topwidth, height, 0, math.atan2(height, -topwidth), 0,
                        0)

    top_r = lm.get_test_leg()
    mid_r = lm.get_test_leg()
    bot_r = lm.get_test_leg()
    bot_l = lm.get_test_leg()
    mid_l = lm.get_test_leg()
    top_l = lm.get_test_leg()

    legs = [top_r, mid_r, bot_r, bot_l, mid_l, top_l]
    leg_poses = [top_r_p, mid_r_p, bot_r_p, bot_l_p, mid_l_p, top_l_p]

    return Chassis(legs, leg_poses)
コード例 #14
0
ファイル: pose_factory.py プロジェクト: zhuoyuzhang/RNAMake
    def split_pose_by_chains(self, p):
        ps = []
        for c in p.chains():
            p_new = pose.Pose()
            s = structure.Structure([c.copy()])
            p.structure = s
            bps = []
            for bp in p.basepairs:
                pass

            ps.append(p)

        return ps
コード例 #15
0
ファイル: pose_factory.py プロジェクト: zhuoyuzhang/RNAMake
    def _copy_info_into_pose(self, m):
        p = pose.Pose()
        p.structure = m.structure
        p.ends = m.ends
        p.end_ids = m.end_ids
        p.basepairs = m.basepairs
        p.name = m.name
        p.path = m.path
        p.beads = m.beads
        p.score = m.score
        p.secondary_structure = m.secondary_structure

        return p
    def update(self):
        if self.controller.atGoal(self.pose, self.goal):
            desired = pose.Pose()
            # if self.track_started:
            rospy.loginfo("Goal Reached")
            self.stopMotor.publish(1)
        else:
            desired = self.controller.getVelocity(self.pose, self.goal,
                                                  self.prev_pose)
            self.track_started = 1

        self.prev_pose = self.pose
        twist = Twist()
        twist.linear.x = desired.xVel
        twist.angular.z = desired.thetaVel
        self.twistPub.publish(twist)
コード例 #17
0
def main():
    frame_size = (1280, 720)
    voxel_resolution = (12, 24)

    # Projection found by findboard
    projection = pose.Projection(
        pose.CameraIntrinsics(
            cameraMatrix=numpy.float32([
                [887.09763773, 0., 639.5],
                [0., 887.09763773, 359.5],
                [0., 0., 1.],
            ]),
            distCoeffs=numpy.float32([0., 0., 0., 0., 0.]),
        ),
        pose.Pose(rvec=numpy.float32([
            [1.32300998],
            [-1.32785091],
            [1.14510022],
        ]),
                  tvec=numpy.float32([
                      [3.58316198],
                      [3.06215196],
                      [10.00036672],
                  ])),
    )

    heatmaps = get_piece_heatmaps(frame_size, voxel_resolution, projection)
    reference_heatmap = get_reference_heatmap(heatmaps)
    occlusions = get_occlusions(heatmaps, projection)
    board = chess.Board()
    projection_shape = tuple(reversed(frame_size))
    negative_composite_memo = {(): Heatmap.blank(projection_shape)}

    subtractor = cv2.imread('diff.png')[:, :, 0]
    normalized_subtractor = subtractor / subtractor.stdev()

    move_diffs = get_move_diffs(heatmaps, reference_heatmap, occlusions,
                                negative_composite_memo, board)
    for (move, move_diff) in move_diffs.items():
        # The Pearson correlation coefficient measures the goodness of fit
        score = (move_diff * normalized_subtractor).mean()
        print('score', board.san(move), score)
        composite = numpy.zeros((720, 1280, 3), dtype=numpy.float32)
        composite[:, :, 2] += move_diff
        composite[:, :, 1] += normalized_subtractor
        cv2.imshow(WINNAME, composite / 20)
        key = cv2.waitKey(0)
コード例 #18
0
    def __draw(self):
        """Draws the world and items in it.
        
           This will draw the markers, the obstacles,
           the robots, their tracks and their sensors
        """

        if self.__robots and self.__center_on_robot:
            # Temporary fix - center onto first robot
            robot = self.__robots[0]
            if self.__orient_on_robot:
                self.__renderer.set_screen_center_pose(robot.get_pose())
            else:
                self.__renderer.set_screen_center_pose(
                    pose.Pose(robot.get_pose().x,
                              robot.get_pose().y, 0.0))

        self.__renderer.clear_screen()

        if self.__draw_supervisors:
            for supervisor in self.__supervisors:
                supervisor.draw_background(self.__renderer)

        for bg_object in self.__background:
            bg_object.draw(self.__renderer)
        for obstacle in self.__obstacles:
            obstacle.draw(self.__renderer)

        # Draw the robots, trackers and sensors after obstacles
        if self.__show_tracks:
            for tracker in self.__trackers:
                tracker.draw(self.__renderer)
        for robot in self.__robots:
            robot.draw(self.__renderer)
            if self.__show_sensors:
                robot.draw_sensors(self.__renderer)

        if self.__draw_supervisors:
            for supervisor in self.__supervisors:
                supervisor.draw_foreground(self.__renderer)

        # update view
        self.__update_view()
コード例 #19
0
def getKeypoints(folder):
    """ Generates keypoints (frames with poses) from openpose output """
    keypointFiles = glob.glob(folder + "/*.json")
    keypoints = []
    for i, keypointFile in enumerate(keypointFiles):
        with open(keypointFile) as data:
            keypoint = json.load(data)

            personIndex = 0
            if len(keypoint["people"]) > 1:
                personIndex = pose.getBiggestPersonIndex(keypoint)

            p = pose.Pose()
            posekp = pose.PoseKeypoint(i, p)
            #if there's no person, use last keypoint data
            if personIndex in range(0, len(keypoint["people"])):
                p.fromData(keypoint["people"][personIndex])
                posekp = pose.PoseKeypoint(i, p)

            keypoints.append(posekp)

    return keypoints
コード例 #20
0
def main():
    """Execute the command"""
    parser = argparse.ArgumentParser(
        description='Run pose processing algorithms over a single image')
    parser.add_argument('-i',
                        '--id',
                        help='marker ID to find',
                        required=False,
                        type=int,
                        default=2)
    parser.add_argument('-f',
                        '--filename',
                        help='source image',
                        required=False,
                        default='images/raw_image_0.png')
    args = parser.parse_args()

    p = pose.Pose()
    frame = cv2.imread(args.filename)
    p.solve(frame, args.id)
    cv2.imshow('frame', p.display_results())
    cv2.waitKey(10000)
コード例 #21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-t",
                        action="store",
                        dest="t_p",
                        type=str,
                        help="Test path")
    parser.add_argument("-f",
                        action="store_true",
                        dest="f_d",
                        default=False,
                        help="Force detection")
    args = vars(parser.parse_args())
    test_path = args["t_p"]

    if test_path == "":
        print("No test path given!")
        return

    kpt_file_path = os.path.join(test_path, "kpt.txt")
    kpt_file_exists = os.path.isfile(kpt_file_path)
    force_detection = args["f_d"]
    img_format = "jpeg"

    ut = util.Util(test_path, roi=300, image_format=img_format)
    if not kpt_file_exists or force_detection:

        ut.processImage()

        demo.PRNet(test_path)

        shutil.rmtree(os.path.join(test_path, "temp"))

    ut.readKPT()
    head_pose = pose.Pose(test_path)
    head_pose.regress(curve=3)
コード例 #22
0
 def pose(self, index):
     return pose.Pose(self.orientations.get_rotation(index),
                      np.array(self.position_vector(index)))
コード例 #23
0
 def initPose(self):
     self.myPose = pose.Pose()
     self.myPose.x = 0
     self.myPose.y = 0
     self.myPose.theta = 0
コード例 #24
0
    def __construct_world(self):
        """Creates objects previously loaded from the world xml file.
           
           This function uses the world in ``self.__world``.
           
           All the objects will be created anew, including robots and supervisors.
           All of the user's code is reloaded.
        """
        if self.__world is None:
            return

        helpers.unload_user_modules()

        self.__state = DRAW_ONCE

        self.__robots = []
        self.__obstacles = []
        self.__supervisors = []
        self.__background = []
        self.__trackers = []
        self.__qtree = None

        for thing in self.__world:
            thing_type = thing[0]
            if thing_type == 'robot':
                robot_type, supervisor_type, robot_pose, robot_color = thing[
                    1:5]
                try:
                    # Create robot
                    robot_class = helpers.load_by_name(robot_type, 'robots')
                    robot = robot_class(pose.Pose(robot_pose))
                    if robot_color is not None:
                        robot.set_color(robot_color)
                    elif len(self.__robots) < 8:
                        robot.set_color(self.__nice_colors[len(self.__robots)])

                    # Create supervisor
                    sup_class = helpers.load_by_name(supervisor_type,
                                                     'supervisors')

                    info = robot.get_info()
                    info.color = robot.get_color()
                    supervisor = sup_class(robot.get_pose(), info)
                    supervisor.set_logqueue(self.__log_queue)
                    name = "Robot {}: {}".format(
                        len(self.__robots) + 1, sup_class.__name__)
                    if self.__supervisor_param_cache is not None and \
                       len(self.__supervisor_param_cache) > len(self.__supervisors):
                        supervisor.set_parameters(
                            self.__supervisor_param_cache[len(
                                self.__supervisors)])
                    self._out_queue.put(
                        ("make_param_window",
                         (robot, name, supervisor.get_ui_description())))
                    self.__supervisors.append(supervisor)

                    # append robot after supervisor for the case of exceptions
                    self.__robots.append(robot)

                    # Create trackers
                    self.__trackers.append(
                        simobject.Path(robot.get_pose(), robot))
                    self.__trackers[-1].set_color(robot.get_color())
                except:
                    self.log(
                        "[Simulator.construct_world] Robot creation failed!")
                    raise
                    #raise Exception('[Simulator.construct_world] Unknown robot type!')
            elif thing_type == 'obstacle':
                obstacle_pose, obstacle_coords, obstacle_color = thing[1:4]
                if obstacle_color is None:
                    obstacle_color = 0xFF0000
                self.__obstacles.append(
                    simobject.Polygon(pose.Pose(obstacle_pose),
                                      obstacle_coords, obstacle_color))
            elif thing_type == 'marker':
                obj_pose, obj_coords, obj_color = thing[1:4]
                if obj_color is None:
                    obj_color = 0x00FF00
                self.__background.append(
                    simobject.Polygon(pose.Pose(obj_pose), obj_coords,
                                      obj_color))
            else:
                raise Exception(
                    '[Simulator.construct_world] Unknown object: ' +
                    str(thing_type))

        self.__time = 0.0
        if not self.__robots:
            raise Exception('[Simulator.construct_world] No robot specified!')
        else:
            self.__recalculate_default_zoom()
            if not self.__center_on_robot:
                self.focus_on_world()
            self.__supervisor_param_cache = None
            self.step_simulation()

        self._out_queue.put(('reset', ()))
コード例 #25
0
 def initPose(self):
     self.leaderPose = pose.Pose()
     self.myPose = pose.Pose()
     self.desired = pose.Pose()
コード例 #26
0
    def on_goal(self, goal):
    	#pass
        self.action_client.wait_for_server()
        action_goal = GoToPoseGoal()
        action_goal.pose.pose = goal.pose
        self.action_client.send_goal(action_goal)

    def get_angle_pose(self, quaternion_pose):
        q = [quaternion_pose.orientation.x,
             quaternion_pose.orientation.y,
             quaternion_pose.orientation.z,
             quaternion_pose.orientation.w]
        roll, pitch, yaw = euler_from_quaternion(q)

        angle_pose = pose.Pose()
        angle_pose.x = quaternion_pose.position.x
        angle_pose.y = quaternion_pose.position.y
        angle_pose.theta = yaw
        return angle_pose

if __name__ == '__main__':
    try:
        node = GoToGoalNode()
        node.main()
        goal = pose.Pose()
        goal.x = 5
        goal.y = 0
        goal.theta = 0
        #node.on_execute(goal)
    except rospy.ROSInterruptException:
        pass
コード例 #27
0
 def sample(self, t):
     f = casadi.Function('f', [self.sym_traj.x],
                         [self.sym_traj.sample(t).asMX()])
     Rt = np.array(f.call([self.x_value])[0])
     return pose.Pose(Rt[:, :3], Rt[:, 3].reshape((3, 1)))
コード例 #28
0
def main():
	#cap = cv2.VideoCapture('idaho.webm')
	#cap.set(cv2.CAP_PROP_POS_MSEC, 52000)
	# Game from https://www.youtube.com/watch?v=jOU3tmXgB8A
	#cap = cv2.VideoCapture('acerook.mp4')
	#cap.set(cv2.CAP_PROP_POS_MSEC, 7000)
	# Game from https://www.youtube.com/watch?v=aHZtDuUMK50
	#cap = cv2.VideoCapture('armin.mp4')
	#cap.set(cv2.CAP_PROP_POS_MSEC, 13000)
	# Game from https://www.youtube.com/watch?v=_N5sEyVc38o
	#cap = cv2.VideoCapture('Zaven Adriasian - Luke McShane, Italian game, Blitz chess.mp4')
	#cap.set(cv2.CAP_PROP_POS_MSEC, 105000)
	# Game from https://www.youtube.com/watch?v=fot9b08TuWc
	cap = cv2.VideoCapture('Carlsen-Karjakin, World Blitz Championship 2012.mp4')
	cap.set(cv2.CAP_PROP_POS_MSEC, 11000)

	ret, firstrgb = cap.read()
	#cv2.imshow(WINNAME, firstrgb)
	#cv2.waitKey(0)
	#while True:
	#	ret, firstrgb = cap.read()
	#	if firstrgb is None:
	#		return
	#	cv2.imshow(WINNAME, firstrgb)
	#	key = cv2.waitKey(1000 // 30) & 0xff
	#	if key == ord(' '):
	#		break

	# FIXME
	#corners = findboard.find_chessboard_corners(firstrgb)
	#projection = findboard.get_projection(corners, firstrgb.shape)
	projection = pose.Projection(cameraIntrinsics=pose.CameraIntrinsics(cameraMatrix=numpy.array([[1.60091387e+03, 0.00000000e+00, 6.39500000e+02], [0.00000000e+00, 1.60091387e+03, 3.59500000e+02], [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]]), distCoeffs=numpy.array([[0.], [0.], [0.], [0.], [0.]])), pose=pose.Pose(rvec=numpy.array([[ 1.68565304], [-1.07984874], [ 0.79226459]]), tvec=numpy.array([[ 2.72539522], [ 6.02566887], [26.47004221]])))

	#cap.set(cv2.CAP_PROP_POS_MSEC, 52000)
	#ret, firstrgb = cap.read()

	firstlab = cv2.cvtColor(firstrgb, cv2.COLOR_BGR2LAB)
	frame_size = tuple(reversed(firstlab.shape[:-1]))
	projection_shape = tuple(reversed(frame_size))

	piece_heatmaps = heatmaps.get_piece_heatmaps(frame_size, VOXEL_RESOLUTION, projection)
	depths = heatmaps.get_depths(projection)
	reference_heatmap = heatmaps.get_reference_heatmap(piece_heatmaps)
	reference_heatmap_numpy = numpy.tile(numpy.expand_dims(reference_heatmap.as_numpy(), axis=-1), (4,))
	#cv2.imshow(WINNAME, reference_heatmap.as_numpy() / reference_heatmap.delegate.max())
	#cv2.waitKey(0)
	first_board = chess.Board()

	white_pieces_board = chess.Board()
	white_pieces_board.set_piece_map({square: piece for (square, piece) in first_board.piece_map().items() if piece.color == chess.WHITE})
	black_pieces_board = chess.Board()
	black_pieces_board.set_piece_map({square: piece for (square, piece) in first_board.piece_map().items() if piece.color == chess.BLACK})
	# FIXME: Account for occlusion of pieces
	visible_white_pieces = heatmaps.get_board_heatmap(piece_heatmaps, white_pieces_board)
	visible_black_pieces = heatmaps.get_board_heatmap(piece_heatmaps, black_pieces_board)

	first_lightness = firstlab[:,:,0]
	white_average = numpy.average(first_lightness[visible_white_pieces.slice], weights=visible_white_pieces.delegate)
	black_average = numpy.average(first_lightness[visible_black_pieces.slice], weights=visible_black_pieces.delegate)

	if black_average > white_average:
		# Switch white and black
		projection = findboard.flip_sides(projection)
		piece_heatmaps = heatmaps.flip_piece_heatmaps(piece_heatmaps)
		depths = heatmaps.flip_depths(depths)
		(visible_white_pieces, visible_black_pieces) = (visible_black_pieces, visible_white_pieces)

	starting_pieces = heatmaps.get_board_heatmap(piece_heatmaps, first_board)
	#cv2.imshow(WINNAME, starting_pieces.as_numpy())
	#cv2.waitKey(0)
	light_squares = surface.get_light_square_heatmap(frame_size, projection)
	visible_light_squares = heatmaps.Heatmap.blend([light_squares, starting_pieces]).subtract(starting_pieces)
	#cv2.imshow(WINNAME, visible_light_squares.as_numpy())
	#cv2.waitKey(0)
	dark_squares = surface.get_dark_square_heatmap(frame_size, projection)
	visible_dark_squares = heatmaps.Heatmap.blend([dark_squares, starting_pieces]).subtract(starting_pieces)
	#cv2.imshow(WINNAME, visible_dark_squares.as_numpy())
	#cv2.waitKey(0)

	color_classifier = sklearn.naive_bayes.GaussianNB()
	class_heatmaps = [visible_light_squares, visible_dark_squares, visible_white_pieces, visible_black_pieces]
	class_pixels = [firstlab[class_heatmap.slice].reshape(-1, firstlab.shape[-1]) for class_heatmap in class_heatmaps]
	class_weights = [class_heatmap.delegate.ravel() for class_heatmap in class_heatmaps]
	class_labels = [numpy.full(class_weight.shape, class_idx) for (class_idx, class_weight) in enumerate(class_weights)]
	color_classifier.fit(
		numpy.vstack(class_pixels),
		numpy.hstack(class_labels),
		numpy.hstack(class_weights))

	first_classes = color_classifier.predict_proba(firstlab.reshape(-1, firstlab.shape[-1])).reshape(tuple(firstlab.shape[:2]) + (4,))
	#classified_composite = numpy.stack([
	#	first_classes[:,:,0],
	#	first_classes[:,:,1] + first_classes[:,:,2],
	#	first_classes[:,:,3] + first_classes[:,:,2],
	#], axis=2)
	#cv2.imshow(WINNAME, classified_composite)
	#cv2.waitKey(0)

	composite_memo = {(): numpy.stack([
		light_squares.as_numpy(),
		dark_squares.as_numpy(),
		heatmaps.Heatmap.blank(projection_shape).as_numpy(),
		heatmaps.Heatmap.blank(projection_shape).as_numpy(),
	], axis=-1)}
	first_move_diffs = heatmaps.get_move_diffs(piece_heatmaps, reference_heatmap, depths, composite_memo, first_board)
	#first_move_diffs = {chess.Move(chess.E2, chess.E4): first_move_diffs[chess.Move(chess.E2, chess.E4)]}

	first_weight = 1.
	first_particle = Particle(first_weight, first_board, first_classes, first_move_diffs)
	particles = {get_board_key(first_board): first_particle}
	threshold_weight = first_weight * MAX_WEIGHT_RATIO
	max_weight = first_weight

	history = collections.deque([firstlab] * HISTORY_LEN)
	while True:
		print('memory', len(particles), sum(len(particle.diffs) for particle in particles.values()), sum(move_diff.size for particle in particles.values() for move_diff in particle.diffs.values()))
		#pos = cap.get(cv2.CAP_PROP_POS_MSEC)
		#cap.set(cv2.CAP_PROP_POS_MSEC, pos + 100)

		ret, framergb = cap.read()
		if framergb is None:
			break
		cv2.imshow(WINNAME, framergb)
		cv2.waitKey(1)
		framelab = cv2.cvtColor(framergb, cv2.COLOR_BGR2LAB)

		history.appendleft(framelab)
		if len(history) > HISTORY_LEN:
			history.pop()

		stable_mask = subtractor.get_stable_mask(history)

		#display_mask = cv2.bitwise_and(framergb, stable_mask)
		#cv2.imshow(WINNAME, display_mask)
		#cv2.waitKey(1)

		# TODO: Only classify the relevant sections of the image
		frame_classes = color_classifier.predict_proba(framelab.reshape(-1, framelab.shape[-1])).reshape(tuple(framelab.shape[:-1]) + (4,))
		#classified_composite = numpy.stack([
		#	frame_classes[:,:,0],
		#	frame_classes[:,:,1] + frame_classes[:,:,2],
		#	frame_classes[:,:,3] + frame_classes[:,:,2],
		#], axis=2)
		#cv2.imshow(WINNAME, classified_composite)
		#cv2.waitKey(1)

		# FIXME: The particles are being updated inside this loop
		# Instead, create a separate collection for the next generation of particles
		for particle in list(particles.values()):
			if not particle.diffs:
				continue

			frame_diff = frame_classes - particle.stable_classes
			stable_diff = numpy.choose(stable_mask, [numpy.zeros_like(frame_diff), frame_diff], mode='clip')
			#stable_diff = frame_diff
			#stable_diff[stable_mask] = 0

			#cv2.imshow(WINNAME, stable_diff)
			#key = cv2.waitKey(1) & 0xff
			#advance_move = key == ord(' ')
			#print('*', key, '*', advance_move)

			#stable_diff_gray = subtractor.lab2mag(stable_diff)
			#stable_diff_heatmap = heatmaps.Heatmap(stable_diff_gray)
			#stable_diff_masked = heatmaps.Heatmap.product_zeros([reference_heatmap, stable_diff_heatmap])
			stable_diff_masked = stable_diff * reference_heatmap_numpy

			centered_subtractor = stable_diff_masked - numpy.expand_dims(reference_heatmap.reweight(stable_diff_masked.sum()).as_numpy(), axis=-1)

			#cv2.imshow(WINNAME, (centered_subtractor[:,:,2] - centered_subtractor[:,:,2].min()) / (centered_subtractor[:,:,2].max() - centered_subtractor[:,:,2].min()))
			#key = cv2.waitKey(0)

			# The Pearson correlation coefficient measures the goodness of fit
			scores = ((move, move_diff, (move_diff * centered_subtractor).sum())
				for (move, move_diff) in particle.diffs.items())
			# Each particle represents a hypothesis for which frame contains the next move
			# Particles don't need to represent multiple alternative moves associated with a single frame,
			# because the piece detection is reliable enough, given an accurate hypothesis about the timing
			best_move_item = max(scores, key=lambda item: item[2])
			(best_move, best_move_diff, best_score) = best_move_item

			subtractor_total_variance = (centered_subtractor**2).sum()
			if subtractor_total_variance:
				subtractor_denom = math.sqrt(subtractor_total_variance * centered_subtractor.size)
			else:
				subtractor_denom = centered_subtractor.size
			best_normalized_score = best_score / subtractor_denom

			#cv2.imshow(WINNAME, (best_move_diff[:,:,2] - best_move_diff[:,:,2].min()) / (best_move_diff[:,:,2].max() - best_move_diff[:,:,2].min()))
			#key = cv2.waitKey(0)

			weight = particle.weight * (best_normalized_score + 1 - EXPECTED_CORRELATION)
			if best_score < MIN_CORRELATION or weight < threshold_weight:
				#print('  rejected candidate', weight, best_move)
				continue

			#newstable_classes = cv2.bitwise_and(frame_classes, stable_mask)
			#invmask = cv2.bitwise_not(stable_mask)
			#hole_classes = cv2.bitwise_and(particle.stable_classes, invmask)
			#stable_classes = cv2.bitwise_or(hole_classes, newstable_classes)
			stable_classes = numpy.choose(stable_mask, [particle.stable_classes, frame_classes], mode='clip')

			next_board = particle.board.copy()
			next_board.push(best_move)
			next_particle_key = get_board_key(next_board)
			existing_particle = particles.get(next_particle_key)

			if existing_particle is None:
				next_move_diffs = heatmaps.get_move_diffs(piece_heatmaps, reference_heatmap, depths, composite_memo, next_board)
			elif existing_particle.weight < weight:
				next_move_diffs = existing_particle.diffs
			else:
				#print('  rejected candidate', weight, chess.Board().variation_san(next_board.move_stack))
				continue

			next_particle = Particle(weight, next_board, stable_classes, next_move_diffs)
			particles[next_particle_key] = next_particle
			#if advance_move:
			#	particles = dict({next_particle_key: next_particle})
			#	print('    accepted candidate', best_normalized_score, chess.Board().variation_san(next_board.move_stack))

			#if advance_move:
			if weight > max_weight:
				composite = numpy.zeros(framergb.shape, dtype=numpy.float32)
				composite[:,:,0] += best_move_diff[:,:,0] + best_move_diff[:,:,1]
				composite[:,:,1] += centered_subtractor[:,:,2] + centered_subtractor[:,:,3]
				composite[:,:,2] += best_move_diff[:,:,2] + best_move_diff[:,:,3]
				for channel in range(3):
					composite[:,:,channel] -= composite[:,:,channel].min()
					composite[:,:,channel] /= composite[:,:,channel].max()
				cv2.putText(
					composite,
					'SCORE: {:.3f}'.format(best_normalized_score),
					(10, 40),
					cv2.FONT_HERSHEY_SIMPLEX,
					1,
					(1, 1, 1),
				)
				cv2.putText(
					composite,
					chess.Board().variation_san(next_board.move_stack),
					(10, 80),
					cv2.FONT_HERSHEY_SIMPLEX,
					1,
					(1, 1, 1),
				)
				cv2.imshow(WINNAME, composite)
				cv2.waitKey(500)
			#advance_move = False

		# Resample by removing low-weighted particles
		max_weight = max(particle.weight for particle in particles.values())
		threshold_weight = max_weight * MAX_WEIGHT_RATIO
		#print('threshold_weight', threshold_weight)
		particles = {key: particle for (key, particle) in particles.items() if particle.weight >= threshold_weight}
		#print('composite_memo', len(composite_memo))
		print('particles:', len(particles), ', diffs:', sum(len(particle.diffs) for particle in particles.values()),
			#', unique diffs:', len(set(hashlib.sha224(numpy.ascontiguousarray(diff.as_numpy())).hexdigest() for particle in particles.values() for diff in particle.diffs.values())),
		)
		for particle in sorted(particles.values(), key=lambda particle: particle.weight, reverse=True):
			print('', particle.weight, chess.Board().variation_san(particle.board.move_stack))

	cap.release()
	cv2.destroyAllWindows()
コード例 #29
0
def main():
    cap, ret = open_video(path)  # Open the video file
    ret, frame = get_frame(cap, ret)  # Get the frame in gray color

    feature_cache = feature.FeaturesCache(
    )  # Features cache for keeping 4 features for initialization
    feature_cache_particle = {}  # Features cache variable for particles filter
    X_map_init = feature.XMapDictionary(
    ).X_map_dict  # Features map (X) on the initialization stage
    particles_filter = []  # Particles filter variable
    motion_model = m_m.Motion_model()  # Motion model object
    fastslam = fastSLAM.FastSLAM()  # SLAM object
    init_pose = pose.Pose(
        constants.
        init_euler_angles,  # Initial pose value from constants.py file
        constants.init_angular_rates,
        constants.init_coordinates)
    frame_number = 0

    # For each frame running the algorithm cycle
    while ret:
        """Initialization stage. It lasts for the 4 frames, in order to create measurements map object(X)."""
        frame_number += 1
        print frame_number
        # Passing by first 5 frames, because of OpenCV SIFT estimation method work.
        if start_frame <= frame_number <= finish_of_init_frame:
            print "Initialization frame number:", frame_number
            if frame_number == start_frame:  # Initialization frame for creating all variable
                measurements_Z = Measurements_Z(
                    [])  # Initialize measurements Z object
                current_measurements = measurements_Z.init_measurement(
                    frame)  # Estimate and input SIFT measurements
                feature_cache.init_get_measurements(
                    current_measurements, X_map_init,
                    pose)  # Input in the feature cache
            else:  # Working until form first measurements map(X) object elements
                current_measurements = measurements_Z.make_measurement(frame)
                feature_cache.get_measurements(current_measurements,
                                               X_map_init, init_pose)

        # Creating the particles objects for the particle filter
        if frame_number == finish_of_init_frame:
            particles_filter = {}
            particle_weight = 1 / float(constants.NUMBER_OF_PARTICLES)
            for i in range(0, constants.NUMBER_OF_PARTICLES):
                particles_filter[i] = pose.Particle(particle_weight,
                                                    copy.deepcopy(init_pose),
                                                    copy.deepcopy(X_map_init),
                                                    copy.deepcopy(X_map_init))
                feature_cache_particle[i] = copy.deepcopy(
                    feature.FeaturesCache())
                feature_cache_particle[i].measurement_dict = copy.deepcopy(
                    feature_cache.measurement_dict)
                feature_cache_particle[i].times_observed = copy.deepcopy(
                    feature_cache.times_observed)
        """ Algorithm work main stage. """
        if frame_number > finish_of_init_frame:
            current_measurements = measurements_Z.make_measurement(frame)
            for i in particles_filter:
                feature_cache_particle[i].get_measurements(
                    current_measurements, particles_filter[i].X_map_dict,
                    particles_filter[i].pose)

                # Motion model update
                motion_model.rotational_motion_model(particles_filter[i].pose)
                motion_model.translational_optimization(
                    particles_filter[i], current_measurements)

                # ***Rao-Blackwellized particle filter update***
                # Time update stage
                fastslam.calc_position_proposal_distribution(
                    particles_filter[i], current_measurements)
                # Measurement update stage and particles weighting
                weight_sum = 0
                weight = fastslam.measurement_update(particles_filter[i],
                                                     current_measurements)
                weight_sum = weight_sum + weight

                # Check for resampling and resampling if condition true
                if fastslam.check_for_resampling(weight_sum):
                    particles_filter[
                        i].weight = 1.0 / constants.NUMBER_OF_PARTICLES
        ret, frame = get_frame(cap, ret)
    else:
        print "time", time.clock()
        print("Video`s end or camera is not working ")
コード例 #30
0
 def __init__(self, segment_input, pose_update_input):
     self.segment_input = segment_input
     self.pose_update_input = pose_update_input
     self.curr_pose = pose.Pose(0, 0, 0, 0, 0, 0)