def test_can_open_or_close_servo_1(self):
     servo_hat = mock(name='servo_hat')
     servo1_pin = 6  #hat pin 6
     servo2_pin = 7  #hat pin 7
     grabber = Grabber(servo_hat, servo1_pin, servo1_pin)
     grabber.servo_1_open_or_close()
     servo_hat.setPWM.assert_called()
     servo_hat.dispose()
Example #2
0
    def init_grabber(self, camera):
        from Grabber import Grabber

        # initialize
        self.grabber = Grabber(camera['serial'], camera['name'], camera['width'],
                               camera['height'], camera['framerate'])

        # start
        self.grabber.start(depth=True)
 def test_open_or_close_servo_2_flips_last_state(self):
     servo_hat = mock(name='servo_hat')
     grabber = Grabber(servo_hat, 6, 7, servo_min=400, servo_max=450)
     grabber.servo_2_last_state = 1  #just ensuring default for test
     #ensure starting value
     self.assertEqual(1, grabber.servo_2_last_state)
     #method under test
     grabber.servo_2_open_or_close()
     #prove the value was changed
     self.assertEqual(0, grabber.servo_2_last_state)
     servo_hat.dispose()
Example #4
0
    def __init__(self, config):
        # generic camera
        super(DirectCamera, self).__init__(config)

        # grabber from directly connected camera
        from Grabber import Grabber

        # initialize
        self.grabber = Grabber(self.serial, self.name, config['width'],
                               config['height'], config['framerate'])

        # start
        self.grabber.start(depth=True)
    def test_can_lift_up_ball(self):
        servo_hat = mock(name='servo_hat')

        lifter = ServoSettings(pin_number=8, min_setting=350, max_setting=375)
        grabber = Grabber(servo_hat,
                          6,
                          7,
                          lifter,
                          servo_min=400,
                          servo_max=450)
        grabber.lift_up_or_down()

        expected_calls = [
            call(8, 0, 350),
            call(8, 0, 355),
            call(8, 0, 360),
            call(8, 0, 365),
            call(8, 0, 370),
            call(8, 0, 375)
        ]

        servo_hat.setPWM.assert_has_calls(expected_calls, any_order=False)
    def test_open_or_close_servo_1_called_executes_specific_pwm_calls(self):
        servo_hat = mock(name='servo_hat')
        grabber = Grabber(servo_hat, 6, 7, servo_min=400, servo_max=450)
        grabber.servo_1_last_state = 0
        grabber.servo_1_open_or_close()
        servo_hat.setPWM.assert_called()

        expected_calls = [
            call(6, 0, 445),
            call(6, 0, 440),
            call(6, 0, 435),
            call(6, 0, 430),
            call(6, 0, 425),
            call(6, 0, 420),
            call(6, 0, 415),
            call(6, 0, 410),
            call(6, 0, 405),
            call(6, 0, 400)
        ]

        servo_hat.setPWM.assert_has_calls(expected_calls, any_order=False)
        servo_hat.dispose()
    def test_open_or_close_servo_2_called_executes_specific_pwm_calls(self):
        servo_hat = mock(name='servo_hat')
        grabber = Grabber(servo_hat, 6, 7, servo_min=400, servo_max=450)
        grabber.servo_2_last_state = 1  #just ensuring default for test
        #method under test
        grabber.servo_2_open_or_close()

        expected_calls = [
            call(7, 0, 400),
            call(7, 0, 405),
            call(7, 0, 410),
            call(7, 0, 415),
            call(7, 0, 420),
            call(7, 0, 425),
            call(7, 0, 430),
            call(7, 0, 435),
            call(7, 0, 440),
            call(7, 0, 445),
            call(7, 0, 450)
        ]

        servo_hat.setPWM.assert_has_calls(expected_calls, any_order=False)
        servo_hat.dispose()
Example #8
0
 def __init__(self, editor, uri):
     from Window import Window
     Window(editor)
     from StateTracker import Tracker
     Tracker(editor)
     from Grabber import Grabber
     Grabber(editor)
     from FullscreenManager import Manager
     Manager(editor)
     from Positioner import Positioner
     Positioner(editor, uri)
     from PositionUpdater import Updater
     Updater(editor, uri)
     from TitleUpdater import Updater
     Updater(editor, uri)
Example #9
0
class DirectCamera(Camera):
    def __init__(self, config):
        # generic camera
        super(DirectCamera, self).__init__(config)

        # grabber from directly connected camera
        from Grabber import Grabber

        # initialize
        self.grabber = Grabber(self.serial, self.name, config['width'],
                               config['height'], config['framerate'])

        # start
        self.grabber.start(depth=True)

    def grab(self):
        # get new image
        image = self.grabber.grab()

        # hold in internal structures
        if 'depth' in image.keys():
            self.depth_image = image['depth'], self.pointcloud = image[
                "pointcloud"]
        if 'color' in image.keys(): self.color_image = image['color']
Example #10
0
    def _init(self):
        self._init_file_stores()
        self._init_linesplit()
        self._init_gelatin()
        self._init_xsltproc()
        self._init_invoke_module()

        grabber_elem = self.cfgtree.find('grabber')
        seeddb_name = grabber_elem.find('seeddb').text
        seeddb = get_seeddb_from_name(self, seeddb_name)
        providers = []
        for provider_elem in grabber_elem.iterfind('provider'):
            provider_name = provider_elem.text
            provider = self._init_provider_from_name(provider_name)
            cond = provider_elem.get('if')
            if cond is not None:
                provider.set_condition(cond)
            providers.append(provider)

        self.grabber = Grabber(seeddb, self.processors, self.stores, providers)
Example #11
0
class ObstacleDetector(object):
    """ Base class for vision-based object detection """

    def __init__(self, camera):

        # use function to allow overloading of image grabber (e.g. via ROS)
        self.init_grabber(camera)

        # subscribe to segmented rgb image topic
        rospy.Subscriber("/{}/segnet/class_mask".format(camera['name']), Image, self.segnet_callback)

        # initialize images

        self.color_image, self.depth_image = None, None

        # to hold segmentation image from jetson inference
        self.segmentation_image = None

        # initialize obstacle reporting structures
        self.obstacle_list, self.obstacle_mask = None, None

        # initialize ros2opencv converter
        self.cv_bridge = CvBridge()

        # rotation matrix to apply on result
        self.rotation = np.array(camera['R'])

        rospy.loginfo('started camera {}'.format(camera['serial']))

    def init_grabber(self, camera):
        from Grabber import Grabber

        # initialize
        self.grabber = Grabber(camera['serial'], camera['name'], camera['width'],
                               camera['height'], camera['framerate'])

        # start
        self.grabber.start(depth=True)

    def grab(self):
        # grab
        image = self.grabber.grab()

        # hold in internal structures
        if 'depth' in image.keys(): self.depth_image = image['depth']
        if 'color' in image.keys(): self.color_image = image['color']

        cv2.imshow(str(self.grabber.cam_serial), 10*self.depth_image)
        cv2.waitKey(1)

    def process_depth(self):

        # default to no detections
        self.obstacle_list, self.obstacle_mask = [], []

        if self.depth_image is None:
            return
            
        # detect obstacles based on depth
        # self.obstacle_list, self.obstacle_mask = segment_depth(self.depth_image)
        self.obstacle_list, self.obstacle_mask = cluster_depth(self.depth_image)


        for detection in self.obstacle_list:

            # prepare vector for multplication by rotation matrix
            xy = [[detection['x']], [detection['y']]]

            # calculate with camera pose
            loc = np.matmul(self.rotation, xy)                                               

            # update results
            detection['x'], detection['y'] = loc[0], loc[1]



    def segnet_callback(self, data):

        self.segmentation_image = ObstacleDetector.cv_bridge.imgmsg_to_cv2(data)

        # use segnet output
        self.label_detections()

    def label_detections(self):
        # TODO: a more elegant way than a global variable
        global labels

        # for each detection
        for curr_obstacle, mask in zip(self.obstacle_list, self.obstacle_mask):
            # mask segmentation image
            segmented_mask = cv2.bitwise_and(self.segmentation_image, mask)

            # find most common element
            most_common_class = mode(segmented_mask.flatten())

            # this will be the classification of the obstacle
            curr_obstacle['class'] = labels[most_common_class]
  striker = StrikerCommands(gpio,
                          striker_pin,
                          striker_reverse_pin,
                          servo_hat,
                          wedge_motor,
                          show_hide_striker_pin,
                          rotate_min = 100,
                          rotate_max = 305)

if has_grabber:
  lift = ServoSettings(gripper_1_lifter_pin,440,475)
  
  grabber = Grabber(servo_hat,
                  gripper_1_pin,
                  None,
                  lifter= lift,
                  servo_min = 130,
                  servo_max=200
                  )

if has_ball_sensor and has_wall_sensor and has_wheels and has_striker:
   brains = Brains(ball_sensor, wall_sensor, wheels, striker)
   brains.is_wall_sensitivity = 25



if has_tank_turret:
   turret = ServoBasics(servo_hat)     

# Keyboard stuff
import Tkinter as tk