예제 #1
0
파일: camera.py 프로젝트: lvatne/fem_hons
    def __init__(self):
        props = sysprops.SysProps()
        self.cv2_cam_dev1 = props.cv2_cam_device1
        self.cv2_cam_dev2 = props.cv2_cam_device2
        self.imgdir = os.path.join(props.robohome, "img/weed")
        self.navdir = os.path.join(props.robohome, "img/navigation")

        self.logger = logging.getLogger('camera_img')

        try:
            robohome = os.environ['ROBOHOME']
        except KeyError:
            print("Error in installation. $ROBOHOME does not exist (motor_sw)")
            self.logger.error(
                "Error in installation. $ROBOHOME does not exist (motor_sw)")
            raise
        self.lights = lights.Lights()
        self.lights.headlights(False)
        self.light_wakeup_t = props.light_wakeup_t
        logdir = os.path.join(robohome, "log")
        hdlr = logging.FileHandler(os.path.join(logdir, "camera_img.log"))
        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)
        self.logger.addHandler(hdlr)
        self.logger.setLevel(logging.WARNING)
        self.logger.warning("--------------+++--------------")
예제 #2
0
    def run(self):
        """
        This method prints the signals to the console
        :return:
        """

        try:
            traffic_lights = lights.Lights()

            color_indicator = 1
            while not self.simulation_stop:
                if color_indicator == 4:
                    color_indicator = 1  # Resetting to 1
                if color_indicator == 1:  # That means red color
                    traffic_lights.red_on()
                    self.hold(self.red_seconds)
                    traffic_lights.red_off()
                elif color_indicator == 2:  # That means yellow color
                    traffic_lights.yellow_on()
                    self.hold(self.yellow_seconds)
                    traffic_lights.yellow_off()
                else:  # That means green color
                    traffic_lights.green_on()
                    self.hold(self.green_seconds)
                    traffic_lights.green_off()
                color_indicator += 1

            traffic_lights.close()
        except Exception:  # Mostly turtle exceptions here
            self.stop()
예제 #3
0
    def __init__(self, app_log):
        """Initializes the Photo To Cloud System."""
        try:
            self.processing = False
            self.start_time = datetime.now()

            self.app_log = app_log
            self.app_log.info("Initialising PhotoToCloud ")

            #initiate the database
            self.settings_database = ptc_database.SettingsDatabase(
                self.app_log)
            self.sequence_step_database = ptc_database.SequenceStepsDatabase(
                self.app_log)
            self.led_groups_database = ptc_database.LEDGroupsDatabase(
                self.app_log)

            #Setup the IO
            self.app_log.info("setting up IO")
            GPIO.setwarnings(False)
            GPIO.setmode(GPIO.BCM)

            #########
            # self.buzzer = int(self.settings_database.get_setting('buzzer_gpio'))
            # self.switch = int(self.settings_database.get_setting('switch_gpio'))
            # self.app_log.info("Shutter GPIO = %s", self.switch)
            # self.shut_down = int(self.settings_database.get_setting('shutdown_gpio'))
            # self.status_light = int(self.settings_database.get_setting('status_light_index'))
            #########
            self.buzzer = 25
            # self.switch = 6
            # self.shut_down =
            GPIO.setup(self.buzzer, GPIO.OUT)
            # GPIO.setup(self.switch, GPIO.IN, pull_up_down=GPIO.PUD_UP)
            # GPIO.setup(self.shut_down, GPIO.IN, pull_up_down=GPIO.PUD_UP)
            #########

            # self.camera = ptc_camera.PTCCamera(self.app_log, self.sequence_step_database, self.settings_database)
            self.lights = lights.Lights(self.app_log,
                                        self.sequence_step_database,
                                        self.settings_database,
                                        self.led_groups_database)

            self.app_log.info("Init time is %d ms", self.millis())

        except Exception as exception:
            self.app_log.exception('Exception: %s', exception)
        finally:
            success = True
            if not self.lights.initialised:
                success = False

            # if not self.camera.initialised:
            #     success = False

            if success:
                self.lights.set_status(lights.GREEN)
            else:
                self.lights.set_status(lights.RED)
예제 #4
0
    def __init__(self, robot_name="", wait_services=False):

        self.robot_name = robot_name
        self.tf_listener = tf_server.TFClient()

        # Body parts
        self.parts = dict()
        self.parts['base'] = base.Base(self.robot_name, self.tf_listener)
        self.parts['torso'] = torso.Torso(self.robot_name, self.tf_listener)
        self.parts['leftArm'] = arms.Arm(self.robot_name,
                                         self.tf_listener,
                                         side="left")
        self.parts['rightArm'] = arms.Arm(self.robot_name,
                                          self.tf_listener,
                                          side="right")
        self.parts['head'] = head.Head(self.robot_name, self.tf_listener)

        # Human Robot Interaction
        self.parts['lights'] = lights.Lights(self.robot_name, self.tf_listener)
        self.parts['speech'] = speech.Speech(
            self.robot_name, self.tf_listener,
            lambda: self.lights.set_color(1, 0, 0),
            lambda: self.lights.set_color(0, 0, 1))
        self.parts['hmi'] = api.Api(self.robot_name, self.tf_listener)
        self.parts['ears'] = ears.Ears(self.robot_name,
                                       lambda: self.lights.set_color(0, 1, 0),
                                       lambda: self.lights.set_color(0, 0, 1))

        self.parts['ebutton'] = ebutton.EButton(self.robot_name,
                                                self.tf_listener)

        # Reasoning/world modeling
        self.parts['ed'] = world_model_ed.ED(self.robot_name, self.tf_listener)

        # Miscellaneous
        self.pub_target = rospy.Publisher("/target_location",
                                          geometry_msgs.msg.Pose2D,
                                          queue_size=10)
        self.base_link_frame = "/" + self.robot_name + "/base_link"

        # Grasp offsets
        #TODO: Don't hardcode, load from parameter server to make robot independent.
        self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0)

        # Create attributes from dict
        for k, v in self.parts.iteritems():
            setattr(self, k, v)
        self.spindle = self.torso  # (ToDo: kind of ugly, why do we still have spindle???)
        self.arms = OrderedDict(
            left=self.leftArm, right=self.rightArm
        )  # (ToDo: kind of ugly, why do we need this???)
        self.ears._hmi = self.hmi  # ToDo: when ears is gone, remove this line

        # Wait for connections
        s = rospy.Time.now()
        for k, v in self.parts.iteritems():
            v.wait_for_connections(1.0)
        e = rospy.Time.now()
        rospy.logdebug("Connecting took {} seconds".format((e - s).to_sec()))
예제 #5
0
    def test_part_two(self):
        "Test part two example of Lights object"

        # 1. Create Lights object from text
        myobj = lights.Lights(part2=True, text=aoc_10.from_text(PART_TWO_TEXT))

        # 2. Check the part two result
        self.assertEqual(myobj.part_two(verbose=False), PART_TWO_RESULT)
예제 #6
0
    def test_part_one(self):
        "Test part one example of Lights object"

        # 1. Create Lights object from text
        myobj = lights.Lights(text=aoc_06.from_text(PART_ONE_TEXT))

        # 2. Check the part one result
        self.assertEqual(myobj.part_one(verbose=False), PART_ONE_RESULT)
예제 #7
0
    def test_empty_init(self):
        "Test the default Lights creation"

        # 1. Create default Lights object
        myobj = lights.Lights()

        # 2. Make sure it has the default values
        self.assertEqual(myobj.part2, False)
        self.assertEqual(myobj.text, None)
        self.assertEqual(len(myobj.points), 0)
예제 #8
0
def run_square():
    try:
        l = lights.Lights()
        t = tracker.Tracker()
        cam = camera.Camera()
        c = t.c

        l.headlights(True)
        time.sleep(0.2)
        l.headlights(False)
    
        cam.take_picture()
        cam.take_nav_picture()
        # print("Heading %f" % (c.heading()))
        c.test_read()

        t.move_dist(0.5)
        time.sleep(1)
        t.turn_relative(90)
        time.sleep(0.5)
        # print("Heading %f" % (c.heading()))
        c.test_read()
    
        cam.take_picture()
        cam.take_nav_picture()
        t.move_dist(0.5)
        time.sleep(1)
        t.turn_relative(90)
        time.sleep(0.5)
        # print("Heading %f" % (c.heading()))
        c.test_read()
    
        cam.take_picture()
        cam.take_nav_picture()
        t.move_dist(0.5)
        time.sleep(1)
        t.turn_relative(90)
        time.sleep(0.5)
        # print("Heading %f" % (c.heading()))
        c.test_read()
    
        cam.take_picture()
        cam.take_nav_picture()
        t.move_dist(0.5)
        time.sleep(1)
        t.turn_relative(90)
        time.sleep(0.5)
        # print("Heading %f" % (c.heading()))
        c.test_read()
    
        l.headlights(False)
    except:
        print("Exception: ", sys.exc_info()[0])
        raise
예제 #9
0
def part_two(args, input_lines):
    "Process part two of the puzzle"

    # 1. Create the puzzle solver
    solver = lights.Lights(part2=True, text=input_lines)

    # 2. Determine the solution for part two
    solution = solver.part_two(verbose=args.verbose, limit=args.limit)
    if solution is None:
        print("There is no solution")
    else:
        print("The solution for part two is %s" % (solution))

    # 3. Return result
    return solution is not None
예제 #10
0
    def __init__(self):
        # indication led
        config = configparser.ConfigParser()
        config.read("../carty.ini")
        self.indication_led = int(config["MAIN"]["INDICATION_LED"])
        gpio.setmode(gpio.BCM)
        gpio.setup(self.indication_led, gpio.OUT)
        gpio.output(self.indication_led, gpio.LOW)

        # hardware modules
        self.motion = motion.Motion()
        self.lights = lights.Lights()

        # mode
        self.mode = Mode.realtime
        self.cached_commands = []
        self.command_time = 0
        self.prev_command = ""
예제 #11
0
 def __init__(self):
     self.state = PhoneCtl.state_mute
     self.process = None
     self.lights = lights.Lights()
     self.phone = hardphone.HardPhone()
     self.udp_endpoint = (os.environ['NIGHTCALL_SINK_HOSTNAME'],
                          PhoneCtl.udp_port)
     self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     self.sock.setblocking(False)
     self.sock.bind((os.environ['NIGHTCALL_LISTEN_IP'], PhoneCtl.udp_port))
     atexit.register(self.sock.close)
     self.remote_picked_up = False
     atexit.register(self.mute)
     self.last_me_ready = False
     self.light_toggle_timeout = None
     self.ring_stop_time = None
     self.ring_toggle_timeout = None
     self.ring_disabled_time = None
예제 #12
0
    def test_text_init(self):
        "Test the Lights object creation from text"

        # 1. Create Lights object from text
        myobj = lights.Lights(text=aoc_06.from_text(EXAMPLE_TEXT))

        # 2. Make sure it has the expected values
        self.assertEqual(myobj.part2, False)
        self.assertEqual(len(myobj.text), 3)
        self.assertEqual(len(myobj.lights), 0)

        # 3. Check methods
        myobj.turn_on(0, 0, 999, 999)
        self.assertEqual(len(myobj.lights), 1000000)
        myobj.toggle(0, 0, 999, 0)
        self.assertEqual(len(myobj.lights), 1000000 - 1000)
        myobj.turn_off(499, 499, 500, 500)
        self.assertEqual(len(myobj.lights), 1000000 - (1000 + 4))
        self.assertEqual(myobj.count_lights(), 998996)
예제 #13
0
    def __init__(self, robot_name="", wait_services=False):

        self.robot_name = robot_name

        self.tf_listener = tf_server.TFClient()

        # Body parts
        self.base = base.Base(self.robot_name,
                              self.tf_listener,
                              wait_service=wait_services)
        self.torso = torso.Torso(self.robot_name, wait_service=wait_services)
        self.spindle = self.torso
        self.leftArm = arms.Arm(self.robot_name, "left", self.tf_listener)
        self.rightArm = arms.Arm(self.robot_name, "right", self.tf_listener)
        self.arms = OrderedDict(left=self.leftArm, right=self.rightArm)

        self.head = head.Head(self.robot_name)

        # Human Robot Interaction
        self.speech = speech.Speech(self.robot_name,
                                    wait_service=wait_services)
        self.ears = ears.Ears(self.robot_name)
        self.ebutton = ebutton.EButton()
        self.lights = lights.Lights()

        # Perception: can we get rid of this???
        self.perception = perception_ed.PerceptionED(
            wait_service=wait_services)

        # Reasoning/world modeling
        self.ed = world_model_ed.ED(wait_service=wait_services)
        self.reasoner = reasoner.Reasoner()

        # Miscellaneous
        self.pub_target = rospy.Publisher("/target_location",
                                          geometry_msgs.msg.Pose2D,
                                          queue_size=10)
        self.base_link_frame = "/" + self.robot_name + "/base_link"

        #Grasp offsets
        #TODO: Don't hardcode, load from parameter server to make robot independent.
        self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0)
예제 #14
0
    def test_text_init(self):
        "Test the Lights object creation from text"

        # 1. Create Lights object from text
        myobj = lights.Lights(text=aoc_10.from_text(EXAMPLE_TEXT))

        # 2. Make sure it has the expected values
        self.assertEqual(myobj.part2, False)
        self.assertEqual(len(myobj.text), 31)
        self.assertEqual(len(myobj.points), 31)
        self.assertEqual(myobj.points[0].posX, 9)
        self.assertEqual(myobj.points[0].posY, 1)
        self.assertEqual(myobj.points[0].velX, 0)
        self.assertEqual(myobj.points[0].velY, 2)
        self.assertEqual(myobj.points[1].posX, 7)
        self.assertEqual(myobj.points[1].posY, 0)
        self.assertEqual(myobj.points[1].velX, -1)
        self.assertEqual(myobj.points[1].velY, 0)
        self.assertEqual(myobj.rows(), 16)

        # 3. Move forward a step
        myobj.step()
        self.assertEqual(myobj.points[0].posX, 9)
        self.assertEqual(myobj.points[0].posY, 3)
        self.assertEqual(myobj.points[0].velX, 0)
        self.assertEqual(myobj.points[0].velY, 2)
        self.assertEqual(myobj.points[1].posX, 6)
        self.assertEqual(myobj.points[1].posY, 0)
        self.assertEqual(myobj.points[1].velX, -1)
        self.assertEqual(myobj.points[1].velY, 0)
        self.assertEqual(myobj.rows(), 12)

        # 4. And a couple of more steps
        myobj.step()
        self.assertEqual(myobj.rows(), 10)
        myobj.step()
        self.assertEqual(myobj.rows(), 8)
        print(myobj.display())
        myobj.step()
        self.assertEqual(myobj.rows(), 11)
예제 #15
0
파일: jackson.py 프로젝트: tdicola/Jackson
 def __init__(self):
     self._microphone = microphone.Microphone()
     self._speech = speech.SpeechRecognizer(self._microphone)
     self._lights = lights.Lights()
     self._keywords = commands.Dispatcher()
     self._commands = commands.Dispatcher(junk=config.GRAMMAR_JUNK)
     self._happiness = 0  # Value that goes from -3 to 3
     self._brightness = 2 # Value that goes from 0 to 3
     self._hue = 0.0
     self._animations = collections.deque()
     self._push_animation(self._idle_animation())
     self._listen_animation = self._create_pulse_animation(80.0, 2.0)
     self._state_lock = threading.RLock()
     # Configure the keywords and their associated callbacks.
     for w in config.WAKE_WORDS:
         self._keywords.register(w, self._wake)
     for w in config.HAPPY_WORDS:
         self._keywords.register(w, self._increment_happiness, 1)
     for w in config.SAD_WORDS:
         self._keywords.register(w, self._increment_happiness, -1)
     # Configure the command parsing based on Jackson's grammar (from
     # commands.gram).  Right now this is all manual configuration and
     # care must be taken to ensure the logic below and commands.gram
     # are kept up to date.
     self._commands.register('wink', self._wink)
     self._commands.register('spectrum', self._spectrum)
     self._commands.register('sparkle', self._sparkle)
     self._commands.register('knight rider', self._knight_rider)
     self._commands.register('brighter', self._increment_brightness, 1)
     self._commands.register('dimmer', self._increment_brightness, -1)
     self._commands.register_starts_with('show me', self._change_animation)
     self._commands.register_starts_with('light up', self._change_color)
     self._commands.register_starts_with('change', self._change)
     self._commands.register_starts_with('set', self._change)
     self._commands.register_starts_with('update', self._change)
     self._commands.register_starts_with('modify', self._change)
     self._commands.register_starts_with('make', self._change)
예제 #16
0
    def __init__(self):
        gtk.Window.__init__(self)
        self.connect('destroy', self.destroy_cb)

        self.config_window = None
        self.live_hide_timeout = 0
        self.light_hop_timeout = 0
        self.busy = False

        self.leds = ledmap.Ledmap(os.path.join(source_dir, 'data', 
                                               'led-maps.txt'))

        logging.debug('loaded %d maps', len(self.leds.get_names()))
        for name in self.leds.get_names():
            bytes = self.leds.get_bytes(name)
            logging.debug('%s: %d lights', name, len(bytes))

        # where project directories get written, see RTI cap above
        self.outdir = options.outdir

        self.lights = lights.Lights()

        # try to reset the lights ... if this fails, disable dome controls
        try:
            self.dome_controls = True
            name = self.leds.get_names()[0]
            self.lights.set_triple(self.leds.get_bytes(name)[0])
        except lights.Error as e:
            logging.debug('no lights found, disabling dome controls')
            self.dome_controls = False

        self.vbox = gtk.VBox(False, 0)
        self.add(self.vbox)
        self.vbox.show()

        fixed = gtk.Fixed()
        self.vbox.pack_start(fixed, False)
        fixed.show()

        self.camera = camera.Camera()
        self.preview = preview.Preview(self.camera)
        fixed.put(self.preview, 0, 0)
        self.preview.show()
        self.preview.connect('motion_notify_event', self.preview_motion_cb)

        if options.verbose:
            try:
                config = camera.Config(self.camera) 
                config.prettyprint(sys.stdout, config.get_root_widget())
            except:
                logging.debug("No Camera detected: unable to print config")
        eb = gtk.EventBox()
        fixed.put(eb, 0, 0)
        eb.show()

        self.progress = progress.Progress()
        self.progress.set_size_request(preview_width, -1)
        eb.add(self.progress)

        eb = gtk.EventBox()
        fixed.put(eb, 0, 0)
        eb.show()

        self.info = info.Info()
        self.info.set_size_request(preview_width, -1)
        eb.add(self.info)

        eb = gtk.EventBox()
        fixed.put(eb, 20, 380)
        eb.show()

        self.play_image = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PLAY, 
                        gtk.ICON_SIZE_SMALL_TOOLBAR)
        self.pause_image = gtk.image_new_from_stock(gtk.STOCK_MEDIA_PAUSE, 
                        gtk.ICON_SIZE_SMALL_TOOLBAR)
        self.live = gtk.Button()
        self.live.set_image(self.play_image)
        self.live.set_tooltip_text("Start/stop live preview")
        self.live.connect('clicked', self.live_cb, None)
        eb.add(self.live)
        self.live.show()

        self.toolbar = gtk.HBox(False, 5)
        self.toolbar.set_border_width(3)
        self.vbox.pack_end(self.toolbar)
        self.toolbar.show()

        button = gtk.Button()
        quit_image = gtk.image_new_from_stock(gtk.STOCK_QUIT, 
                        gtk.ICON_SIZE_SMALL_TOOLBAR)
        quit_image.show()
        button.set_tooltip_text("Quit RTIAcquire")
        button.connect('clicked', self.destroy_cb, None)
        button.add(quit_image)
        self.toolbar.pack_end(button, False, False)
        button.show()

        if self.dome_controls:
            self.dome_picker = gtk.combo_box_new_text()
            for name in self.leds.get_names():
                self.dome_picker.append_text(name)
            self.dome_picker.set_active(0)
            self.dome_picker.set_tooltip_text("Select lighting system")
            self.dome_picker.connect('changed', self.dome_picker_cb, None)
            self.toolbar.pack_start(self.dome_picker, False, False)
            self.dome_picker.show()

            self.light_picker = gtk.SpinButton(climb_rate = 1)
            self.light_picker.set_numeric(True)
            self.light_picker.set_wrap(True)
            self.light_picker.set_increments(1, 1)
            self.light_picker.set_tooltip_text("Pick light")
            self.light_picker_refresh()
            self.light_picker.connect('value_changed', 
                    self.light_picker_cb, None)
            self.toolbar.pack_start(self.light_picker, False, False)
            self.light_picker.show()

        button = gtk.Button()
        menu_image = gtk.image_new_from_stock(gtk.STOCK_PREFERENCES, 
                        gtk.ICON_SIZE_SMALL_TOOLBAR)
        menu_image.show()
        button.set_tooltip_text("Camera settings")
        button.connect('clicked', self.config_cb, None)
        button.add(menu_image)
        self.toolbar.pack_start(button, False, False)
        button.show()

        button = gtk.Button('Focus')
        button.set_tooltip_text("Focus camera automatically")
        button.connect('clicked', self.focus_cb, None)
        self.toolbar.pack_start(button, False, False)
        button.show()

        photo_image = gtk.image_new_from_file(
                os.path.join(source_dir, 'data', 'camera_24.png'))
        photo = gtk.Button()
        photo.set_image(photo_image)
        photo.set_tooltip_text("Take single photo")
        photo.connect('clicked', self.photo_cb, None)
        self.toolbar.pack_start(photo, False, False)
        photo.show()

        if self.dome_controls:
            photo = gtk.Button('RTI Preview')
            photo.set_tooltip_text("Take preview RTI image")
            photo.connect('clicked', self.rti_preview_cb, None)
            self.toolbar.pack_start(photo, False, False)
            photo.show()

            photo = gtk.Button('RTI Capture ...')
            photo.set_tooltip_text("Start full RTI acquisition")
            photo.connect('clicked', self.rti_capture_cb, None)
            self.toolbar.pack_start(photo, False, False)
            photo.show()

        self.info.msg('Welcome to RTI Acquire', 'v1.3, March 2014')

        self.show()
예제 #17
0
    def __init__(self, robot_name="", wait_services=False):

        self.robot_name = robot_name
        self.tf_listener = tf.TransformListener()

        # Body parts
        self.parts = dict()
        self.parts['base'] = base.Base(self.robot_name, self.tf_listener)
        self.parts['torso'] = torso.Torso(self.robot_name, self.tf_listener)
        self.parts['leftArm'] = arms.Arm(self.robot_name,
                                         self.tf_listener,
                                         side="left")
        self.parts['rightArm'] = arms.Arm(self.robot_name,
                                          self.tf_listener,
                                          side="right")
        self.parts['head'] = head.Head(self.robot_name, self.tf_listener)
        self.parts['perception'] = perception.Perception(
            self.robot_name, self.tf_listener)
        self.parts['ssl'] = ssl.SSL(self.robot_name, self.tf_listener)

        # Human Robot Interaction
        self.parts['lights'] = lights.Lights(self.robot_name, self.tf_listener)
        self.parts['speech'] = speech.Speech(
            self.robot_name, self.tf_listener,
            lambda: self.lights.set_color_colorRGBA(lights.SPEAKING),
            lambda: self.lights.set_color_colorRGBA(lights.RESET))
        self.parts['hmi'] = api.Api(
            self.robot_name, self.tf_listener,
            lambda: self.lights.set_color_colorRGBA(lights.LISTENING),
            lambda: self.lights.set_color_colorRGBA(lights.RESET))
        self.parts['ears'] = ears.Ears(
            self.robot_name, self.tf_listener,
            lambda: self.lights.set_color_colorRGBA(lights.LISTENING),
            lambda: self.lights.set_color_colorRGBA(lights.RESET))

        self.parts['ebutton'] = ebutton.EButton(self.robot_name,
                                                self.tf_listener)

        # Reasoning/world modeling
        self.parts['ed'] = world_model_ed.ED(self.robot_name, self.tf_listener)

        # Miscellaneous
        self.pub_target = rospy.Publisher("/target_location",
                                          geometry_msgs.msg.Pose2D,
                                          queue_size=10)
        self.base_link_frame = "/" + self.robot_name + "/base_link"

        # Check hardware status
        self._hardware_status_sub = rospy.Subscriber(
            "/" + self.robot_name + "/hardware_status", DiagnosticArray,
            self.handle_hardware_status)

        # Grasp offsets
        #TODO: Don't hardcode, load from parameter server to make robot independent.
        self.grasp_offset = geometry_msgs.msg.Point(0.5, 0.2, 0.0)

        # Create attributes from dict
        for partname, bodypart in self.parts.iteritems():
            setattr(self, partname, bodypart)
        self.arms = OrderedDict(
            left=self.leftArm, right=self.rightArm
        )  # (ToDo: kind of ugly, why do we need this???)
        self.ears._hmi = self.hmi  # ToDo: when ears is gone, remove this line

        # Wait for connections
        s = rospy.Time.now()
        for partname, bodypart in self.parts.iteritems():
            bodypart.wait_for_connections(1.0)
        e = rospy.Time.now()
        rospy.logdebug("Connecting took {} seconds".format((e - s).to_sec()))

        if not self.operational:
            not_operational_parts = [
                name for name, part in self.parts.iteritems()
                if not part.operational
            ]
            rospy.logwarn("Not all hardware operational: {parts}".format(
                parts=not_operational_parts))
예제 #18
0
import lights, colors
import time

strip = lights.Lights(10)

while True:
    strip.fill(colors.Colors.RED)
    time.sleep(2)
    strip.fill(colors.ColorSets.RGB)
    time.sleep(2)
    strip.fill("#ffffff")
    time.sleep(2)

while True:
    strip.breathe(colors.Colors.WHITE)
예제 #19
0
#!/usr/bin/python

import numpy as np
import geofence
import tracker
import motor_sw
import lights
import time

t = tracker.Tracker()
g = t.g
l = lights.Lights()

v = np.zeros((500, 3))
time.sleep(3)
l.headlights(True)
for i in range(500):
    v[i] = g.read()
    time.sleep(0.01)

l.headlights(False)
np.savetxt("gyro_data_180.txt", v)
예제 #20
0
from multiprocessing import Process, Queue, Lock
import threading, time
import lights;
import motor;
import comms;

lq = Queue()
mq = Queue()
sq = Queue()

lights = lights.Lights()
motor = motor.Motor()
comms = comms.Comms()

if __name__ == '__main__':
  print("Hello")

  comms_process = Process(target=comms.start, args=( sq, ))
  comms_process.start()

  light_process = Process(target=lights.start, args=( lq, ))
  light_process.start()

  motor_process = Process(target=motor.startController, args=( mq, sq ))
  motor_process.start()

  done = False
  while (not done):
    time.sleep(1)
    command = raw_input("Lights or motor?")
    if command == "lights":