def dock(self): Vr_to_t = self.search() #angle_g_to_t=math.atan2((self.goal[1] - self.search()[1]), (self.goal[0] - self.search()[0])) #angle_r_to_t=math.atan2((self.X[1] - self.search()[1]), (self.X[0] - self.search()[0])) #theta = angle_r_to_t - angle_g_to_t box_location = BoxLocation(x=self.box_pos[0], y=self.box_pos[1]) goal = Point(x=self.goal[0], y=self.goal[1]) robot_location = RobotLocation(x=self.X[0], y=self.X[1]) angle_g_to_t = Motion.angle_between_points(box_location, goal) angle_r_to_t = Motion.angle_between_points(box_location, robot_location) theta = Motion.delta_between_angles(angle_g_to_t, angle_r_to_t) if theta < 0: Vperp = np.array([-Vr_to_t[1], Vr_to_t[0], Vr_to_t[2]]) # turn left? theta = math.pi + theta else: Vperp = np.array([Vr_to_t[1], -Vr_to_t[0], Vr_to_t[2]]) # turn right? theta = math.pi - theta alpha = theta / THETA_MAX if theta > THETA_MAX: #print 'v perp', Vperp return Vperp else: #print 'v perp + ...', alpha*Vperp + (1-alpha)*Vr_to_t return alpha * Vperp + (1 - alpha) * Vr_to_t
def run(self: Detector) -> None: self.camera.start() try: framecount = 0 motion = Motion() for image in self.camera.yield_image(): framecount += 1 if framecount >= self.fastforward: start_time = time.perf_counter() bboxes = motion.detect(image) objects = self.predictor.predict(image, bboxes) end_time = time.perf_counter() elapsed_ms = (end_time - start_time) * 1000 self.camera.clear() self.camera.draw_objects(objects) self.camera.draw_time(elapsed_ms) self.camera.draw_count(len(objects)) self.camera.update() framecount = 0 self.before = image except KeyboardInterrupt: pass finally: self.camera.stop() return
def main(): args = _parse_args() logging.basicConfig(level=logging.DEBUG) team_score = TeamScore(args.team, args.json) audiovisual = Audiovisual() motion = Motion() while(True): if motion.presence(): if team_score.new_goal(): audiovisual.set_melody(Audiovisual.MELODY) time.sleep(OFF_TIME) audiovisual.set_melody(Audiovisual.SILENCE) team_result = team_score.result() if team_result == TeamScore.RESULT_WIN: audiovisual.set_color(Audiovisual.GREEN) elif team_result == TeamScore.RESULT_DRAW: audiovisual.set_color(Audiovisual.YELLOW) else: audiovisual.set_color(Audiovisual.RED) else: audiovisual.set_color(Audiovisual.WHITE) time.sleep(args.time-OFF_TIME)
def __init__(self, infile=0): self.cam = cv2.VideoCapture(infile) self.mot = Motion() self.stopped = False Thread(target=self.run, args=()).start()
def __init__(self): super().__init__() #ノード名を宣言 rospy.init_node('OP3Env', anonymous=True) self.rate = rospy.Rate(RATE) action = np.array([ -1.5, 1.5, -1.7, 0.00, 1.7, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 ]) # OP3Env.state = np.array([0, 0, 1.8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) OP3Env.state = np.zeros(NUM_STATE) # state更新 sub1 = rospy.Subscriber('gazebo/model_states', ModelStates, callback_get_state) sub2 = rospy.Subscriber('robotis_op3/joint_states', JointState, callback_get_joint_state) reward = 0 self.action_space = gym.spaces.Discrete(NUM_ACTION) self.observation_space = gym.spaces.Box(low=-np.pi, high=np.pi, shape=(NUM_STATE, )) self.Com = Motion(action) self.reset()
def angle_to_point_behind_box(self): angle_g_to_t = Motion.angle_between_points(self.boxLocation, self.target) angle_r_to_t = Motion.angle_between_points(self.boxLocation, self.robotLocation) theta = Motion.delta_between_angles(angle_g_to_t, angle_r_to_t) theta = Motion.fix_angle_180(theta) return theta
def __init__(self): self.kb = KnowledgeBase() self.location = BarcodeServer(self.location_callback) self.flight_path = FlightPath() self.motion = Motion(self.kb) self.node_a = NodeAServer()
class Controller(object): def __init__(self): self.kb = KnowledgeBase() self.location = BarcodeServer(self.location_callback) self.flight_path = FlightPath() self.motion = Motion(self.kb) self.node_a = NodeAServer() def _init_fp(self): self.flight_path.build_full_graph() def location_callback(self, position): """ Location callback. This function is used by the threaded location module to update the current location for the controller. Parameters ---------- position : str String representation of six digit number indicating current position. """ self.kb.current_location = position # print "Position: ", self.kb.current_location def run(self): self.location.start() self.node_a.start() path = [100001, 100002, 100003, 100004, 100005, 100006, 100007] for p in path: self.motion.move_until_location(p) if p == 100001: tic = time.time() else: toc = time.time() self.kb.duration.append(toc-tic) tic = time.time() print "kb.duration: ", self.kb.duration def shutdown(self): self.motion.shutdown() self.node_a.join()
def load_modules(self): self._speech = Speech(self.session) self._motion = Motion(self.session) self._tablet = Tablet(self.session) self._face_detection = FaceDetection(self.session, self) self._wave_detection = WavingDetection(self.session) self._audio_player = AudioPlayer(self.session) self._speech_recognition = SpeechRecognition(self.session) self._system = System(self.session)
def __init__(self, safety_level=0): Motion.__init__(self) self._logger = Logger("SafeMotion") # set up sensor module self.sensors = Sensors() # set avoidance state self.avoiding = False # set motion modification based on safety level self._motionModifier = self._avoid if safety_level > 0 else self._safetyStop
def run(self): motion = Motion() try: self._sift = MatchWithSIFT() self._sift.load_refs() except (AttributeError, cv2.error): print("SIFT not available! Won't parse dice!") _run = False _stop = False _sample = False with self.conlock: _run = self._apprun _stop = self._appstop _sample = self._sample while _run: self._cam.start() while self._cam.ready(): with self.conlock: self.conlock.wait_for(lambda: (self._appstop or self._sample)) _sample = self._sample _stop = self._appstop if _stop: break if not _sample: continue motion.roll() frame = None while frame is None: frame = self._cam.get_frame() dice, frame = self._process_image(frame) with self.conlock: self._sample = False with self.reslock: self.dice = dice self.frame = frame self.fresh = True self.reslock.notify() self._cam.stop() with self.conlock: _run = self._apprun
def mappingMode(): try: motionVals = Motion() map_all_inputs(possibleInputs) # mapping all the inputs while True: movementVals = motionVals.get_motion() printInput = check_input( movementVals, possibleInputs ) # every second, check for the current input among all the possible inputs print(printInput) movementVals = [] # reset current motion val time.sleep(1.0) # sleep for 1 second except KeyboardInterrupt: sys.exit()
def move(self): if Motion.dis_between_points(self.boxLocation,self.goal) < 0.5: print 'Goooooooal reached!!!!!!!!!!!!!' self.v = 0. self.omega = 0. return if Motion.dis_between_points(self.boxLocation,self.target) < .5: self.target = Point(self.wayPoints[self.targetIndex][0],self.wayPoints[self.targetIndex][1]) self.targetIndex += 1 if Motion.is_box_in_front_of_robot(self.robotLocation,self.boxLocation,self.target, behind_x=4.5): #print 'Pushing!' self.v,self.omega,run_time1,run_time2 = Motion.go_to_point(self.robotLocation, self.push()) return self.v,self.omega,run_time1,run_time2 = Motion.go_to_point(self.robotLocation,self.acquire())
class Controller(object): def __init__(self): self.kb = KnowledgeBase() self.location = BarcodeServer(self.location_callback) self.flight_path = FlightPath() self.motion = Motion(self.kb) self.node_a = NodeAServer() def _init_fp(self): self.flight_path.build_full_graph() def location_callback(self, position): """ Location callback. This function is used by the threaded location module to update the current location for the controller. Parameters ---------- position : str String representation of six digit number indicating current position. """ self.kb.current_location = position # print "Position: ", self.kb.current_location def run(self): self.location.start() self.node_a.start() path = [100001, 100002, 100003, 100004, 100005, 100006, 100007] for p in path: self.motion.move_until_location(p) if p == 100001: tic = time.time() else: toc = time.time() self.kb.duration.append(toc - tic) tic = time.time() print "kb.duration: ", self.kb.duration def shutdown(self): self.motion.shutdown() self.node_a.join()
class Capture: def __init__(self, infile=0): self.cam = cv2.VideoCapture(infile) self.mot = Motion() self.stopped = False Thread(target=self.run, args=()).start() def run(self): while not self.stopped: if self.cam.isOpened(): ret, img = self.cam.read() if ret: #time.sleep(0.05) img = imutils.resize(img, width=800) img = imutils.rotate(img, angle=90) Store.current_frame += 1 if self.mot.has_movement(img): Store.queue_frame(img, Store.current_frame) self.stop() def stop(self): self.stopped = True self.cam.release()
def run_server(): global bot global cam global motion global audio try: try: app.bot_config = Config.read() bot = CoderBot.get_instance( servo=(app.bot_config.get("move_motor_mode") == "servo"), motor_trim_factor=float(app.bot_config.get("move_motor_trim", 1.0)), ) audio = Audio.get_instance() audio.say(app.bot_config.get("sound_start")) try: cam = Camera.get_instance() motion = Motion.get_instance() except picamera.exc.PiCameraError: logging.error("Camera not present") if app.bot_config.get("load_at_start") and len(app.bot_config.get("load_at_start")): app.prog = app.prog_engine.load(app.bot_config.get("load_at_start")) app.prog.execute() except ValueError as e: app.bot_config = {} logging.error(e) bot.set_callback(PIN_PUSHBUTTON, button_pushed, 100) app.run(host="0.0.0.0", port=8080, debug=True, use_reloader=False, threaded=True) finally: if cam: cam.exit() if bot: bot.exit() app.shutdown_requested = True
def __init__(self, dir, color_dir, semantic_dir, audio_dir, motion_dir): color = Color(dir, color_dir, 352, 288, color_space="RGB", mode="average", bins=32) color.load() resnet = ResNet(dir, semantic_dir, 352, 288) resnet.load() audio = Audio(dir, audio_dir, mode="max") audio.load() motion = Motion(dir, motion_dir) motion.load() self.audio = audio self.resnet = resnet self.color = color self.motion = motion
def __init__(self): # Initialize ROS node rospy.init_node("Controller") self.name = "Controller object" self.rate = rospy.Rate(10) # allow node to 'spin' at 10hz rospy.on_shutdown(self.shutdown) # calls this function when Ctrl+C self.motion = Motion() self.sensor = Sensor() self.localizer = Localizer() self.visited_highlights = [] # Boolean for reached goal self.goalReached = False
def __init__(self, jerky=False, walking_speed=1): # listen for frame transformations TfTransformer.__init__(self) # initialize motion component of navigation self._motion = Motion() self._sensors = Sensors() self._jerky = jerky self._walking_speed = min(abs(walking_speed), 1) self._logger = Logger("Navigation") # set up obstacle avoidance self._avoiding = False self._obstacle = False self._bumped = False self._bumper = 0 # we're going to send the turtlebot to a point a quarter meter ahead of itself self._avoid_goto = PointStamped() self._avoid_goto.header.frame_id = "/base_footprint" self._avoid_goto.point.x = self._AVOID_DIST self._avoid_target = None self._avoid_turn = None # subscibe to the robot_pose_ekf odometry information self.p = None self.q = None self.angle = 0 self._target_turn_delta = 0 rospy.Subscriber('/robot_pose_ekf/odom_combined', PoseWithCovarianceStamped, self._ekfCallback) # set up navigation to destination data self._reached_goal = True # set up the odometry reset publisher (publishing Empty messages here will reset odom) reset_odom = rospy.Publisher('/mobile_base/commands/reset_odometry', Empty, queue_size=1) # reset odometry (these messages take about a second to get through) timer = time() while time() - timer < 1 or self.p is None: reset_odom.publish(Empty())
def run_server(): bot = None cam = None try: try: app.bot_config = Config.read() bot = CoderBot.get_instance( servo=(app.bot_config.get("move_motor_mode") == "servo"), motor_trim_factor=float( app.bot_config.get('move_motor_trim', 1.0))) audio = Audio.get_instance() audio.say(app.bot_config.get("sound_start")) try: cam = Camera.get_instance() Motion.get_instance() except picamera.exc.PiCameraError: logging.error("Camera not present") CNNManager.get_instance() EventManager.get_instance("coderbot") if app.bot_config.get('load_at_start') and app.bot_config.get( 'load_at_start'): app.prog = app.prog_engine.load( app.bot_config.get('load_at_start')) app.prog.execute() except ValueError as e: app.bot_config = {} logging.error(e) bot.set_callback(PIN_PUSHBUTTON, button_pushed, 100) remove_doreset_file() app.run(host="0.0.0.0", port=5000, debug=True, use_reloader=False, threaded=True) finally: if cam: cam.exit() if bot: bot.exit() app.shutdown_requested = True
def move(self): if Motion.dis_between_points(self.boxLocation, self.goal) < 0.5: print 'Goooooooal reached!!!!!!!!!!!!!' self.v = 0. self.omega = 0. return if Motion.dis_between_points(self.boxLocation, self.target) < .5: self.target = Point(self.wayPoints[self.targetIndex][0], self.wayPoints[self.targetIndex][1]) self.targetIndex += 1 if Motion.is_box_in_front_of_robot(self.robotLocation, self.boxLocation, self.target, behind_x=4.5): #print 'Pushing!' self.v, self.omega, run_time1, run_time2 = Motion.go_to_point( self.robotLocation, self.push()) return self.v, self.omega, run_time1, run_time2 = Motion.go_to_point( self.robotLocation, self.acquire())
def motion_window(self): from motion import Motion self.motion_window = tk.Toplevel(self.master) main_window.withdraw() new_window = Window() new_window.center_window(self.motion_window, 675, 590) self.app = Motion(self.motion_window) self.motion_window.protocol( "WM_DELETE_WINDOW", lambda: self.iconify_main_panel(self.motion_window))
def __init__(self, pheromone_map, pos=(0, 0), size=200): self.pos = pos self.pheromone_map = pheromone_map self.ants = [] for i in range(size): ant = Ant(colony_pos=pos, direction=random.choice(Direction.to_list())) ant.motion = Motion(pheromone_map=pheromone_map, ant=ant, direction_distr=[0.7, 0.2, 0.1], pheromone_importance=0.9) self.ants.append(ant)
def move(self): if self.distance(self.box_pos,self.goal) < 1.5: print 'Goal reached!!!!!!!!!!!!!' self.v = 0. self.omega = 0. return euler = tf.transformations.euler_from_quaternion(self.R) if Motion.is_box_in_front_of_robot(RobotLocation(x=self.X[0],y=self.X[1],theta=euler[2]), BoxLocation(x=self.box_pos[0],y=self.box_pos[1]),goal=Point(x=self.goal[0],y=self.goal[1]),behind_x=4.5): print 'Pushing!!!!!!!!!!!!!' self.v,self.omega = motion.update_motion(self.X,self.R,self.push()) return self.v,self.omega = motion.update_motion(self.X,self.R,self.acquire())
def __init__(self): self.motionDetected = False; self.relay = Relays(7); self.relay.setAllOff(); self.motion = Motion(0, 3, self); self.light = Light(3, 1000, self); self.temp = Temp(1, 85, self); self.start = time.time(); while True: print('********** Switch Module Starting **********'); time.sleep(5);
def run_server(): bot = None cam = None try: try: app.bot_config = Config.read() bot = CoderBot.get_instance(servo=(app.bot_config.get("move_motor_mode") == "servo"), motor_trim_factor=float(app.bot_config.get('move_motor_trim', 1.0))) audio = Audio.get_instance() audio.say(app.bot_config.get("sound_start")) try: cam = Camera.get_instance() Motion.get_instance() except picamera.exc.PiCameraError: logging.error("Camera not present") CNNManager.get_instance() EventManager.get_instance("coderbot") if app.bot_config.get('load_at_start') and app.bot_config.get('load_at_start'): app.prog = app.prog_engine.load(app.bot_config.get('load_at_start')) app.prog.execute() except ValueError as e: app.bot_config = {} logging.error(e) bot.set_callback(PIN_PUSHBUTTON, button_pushed, 100) remove_doreset_file() app.run(host="0.0.0.0", port=5000, debug=True, use_reloader=False, threaded=True) finally: if cam: cam.exit() if bot: bot.exit() app.shutdown_requested = True
def dock(self): Vr_to_t = self.search() #angle_g_to_t=math.atan2((self.goal[1] - self.search()[1]), (self.goal[0] - self.search()[0])) #angle_r_to_t=math.atan2((self.X[1] - self.search()[1]), (self.X[0] - self.search()[0])) #theta = angle_r_to_t - angle_g_to_t box_location = BoxLocation(x=self.box_pos[0],y=self.box_pos[1]) goal=Point(x=self.goal[0],y=self.goal[1]) robot_location=RobotLocation(x=self.X[0],y=self.X[1]) angle_g_to_t= Motion.angle_between_points(box_location, goal) angle_r_to_t= Motion.angle_between_points(box_location, robot_location) theta = Motion.delta_between_angles(angle_g_to_t,angle_r_to_t) if theta < 0: Vperp = np.array([-Vr_to_t[1],Vr_to_t[0],Vr_to_t[2]]) # turn left? theta = math.pi + theta else: Vperp = np.array([Vr_to_t[1],-Vr_to_t[0],Vr_to_t[2]]) # turn right? theta = math.pi - theta alpha = theta/THETA_MAX if theta > THETA_MAX: #print 'v perp', Vperp return Vperp else: #print 'v perp + ...', alpha*Vperp + (1-alpha)*Vr_to_t return alpha*Vperp + (1-alpha)*Vr_to_t
def dock(self): Vr_to_t = self.search()- self.get_robot_location() theta = self.angle_to_point_behind_box() if theta < 0: Vperp = np.array([-Vr_to_t[1],Vr_to_t[0]]) # turn left? theta = math.pi + theta else: Vperp = np.array([Vr_to_t[1],-Vr_to_t[0]]) # turn right? theta = math.pi - theta alpha = theta/THETA_MAX if theta > THETA_MAX and Motion.is_point_in_front_of_robot(self.robotLocation,self.boxLocation): v= Vperp else: v= alpha*Vperp + (1-alpha)*Vr_to_t return v
def dock(self): Vr_to_t = self.search() - self.get_robot_location() theta = self.angle_to_point_behind_box() if theta < 0: Vperp = np.array([-Vr_to_t[1], Vr_to_t[0]]) # turn left? theta = math.pi + theta else: Vperp = np.array([Vr_to_t[1], -Vr_to_t[0]]) # turn right? theta = math.pi - theta alpha = theta / THETA_MAX if theta > THETA_MAX and Motion.is_point_in_front_of_robot( self.robotLocation, self.boxLocation): v = Vperp else: v = alpha * Vperp + (1 - alpha) * Vr_to_t return v
def run(delay, sensor_type, pin, webhook, source, metric_prefix, output, format): sensor = None is_polling = False if sensor_type.startswith('DHT'): pin = int(pin) sensor = Temp(source, metric_prefix, output, sensor_type, pin, format) is_polling = True elif sensor_type == 'HC-SRO': pins = pin.split(',') sensor = Distance(source, metric_prefix, output, sensor_type, int(pins[0]), int(pins[1]), format) is_polling = True elif sensor_type == 'SSM-1': pin = int(pin) sensor = Generic(source, metric_prefix, output, sensor_type, pin, 'Sound') elif sensor_type == 'HC-SR501': pin = int(pin) sensor = Motion(source, metric_prefix, output, sensor_type, pin) elif sensor_type == 'SEO53': pin = int(pin) sensor = Generic(source, metric_prefix, output, sensor_type, pin, 'Vibration') elif sensor_type == 'SEO23': pin = int(pin) sensor = Generic(source, metric_prefix, output, sensor_type, pin, 'Tilt') elif sensor_type == 'YL-69': pin = int(pin) sensor = Moisture(source, metric_prefix, output, sensor_type, pin, 'Moisture', delay) else: sensor = Host(source, metric_prefix, output) is_polling = True while True: sensor.get_info() metrics = sensor.format_metrics() if webhook is not None: send_metrics(metrics, output, webhook, source) else: print(metrics) # check if the senor is something that needs to poll for results based on a delay. if is_polling: time.sleep(delay)
class Autopilot: def __init__(self): self.vision = Vision(self._on_new_capture) self.motion = Motion() def start(self): self.motion.set_values(0,0) self.motion.enable() self.vision.start() def stop(self): self.motion.disable() self.vision.stop() @property def is_running(self): return self.vision.is_observing def _on_new_capture(self, objects): # Speed equals the probability of the most probable object detected. # That is, if confident - move fast speed = objects[0].probability total_weight = 0 direction_weight = 0 for i in range(0, len(objects)): obj = objects[i] # box size times probability weight = (obj.box[2] - obj.box[0]) * (obj.box[3] - obj.box[1]) * obj.probability # if it's not a duck - discount heavily if obj.category != 1: weight /= 10 total_weight += weight # box_mid_point = (obj.box[3] + obj.box[1]) / 2 # translate [0,1] into [-1,1] # direction = box_mid_point * 2 - 1 direction = (obj.box[3] + obj.box[1]) - 1 direction_weight += direction * weight direction = direction_weight / total_weight self.motion.set_values(speed, direction)
def run_server(): global bot global cam global motion try: app.bot_config = Config.read() bot = CoderBot.get_instance(servo=(app.bot_config.get("move_motor_mode")=="servo")) cam = Camera.get_instance() motion = Motion.get_instance() except ValueError as e: app.bot_config = {} logging.error(e) if app.bot_config.get('load_at_start') and len(app.bot_config.get('load_at_start')): app.prog = app.prog_engine.load(app.bot_config.get('load_at_start')) bot.set_callback(PIN_PUSHBUTTON, button_pushed, 100) bot.say(app.bot_config.get("sound_start")) app.run(host="0.0.0.0", port=8080, debug=True, use_reloader=False, threaded=True)
def ManageMotion(): motion = Motion() while motion.IsActive(): ManageCommands(motion) # Manage motion and gestures motion.GetInformationOnNextFrame() if motion.TimeElapsedSinceLastMotion( ) > config['timeToWaitWhenNoMovementBeforeSleep']: time.sleep(config['timeToSleepWhenNoMovement']) gesture = motion.GetGesture() threading.Thread(target=SendGesture, args=(gesture, )).start() motion.Dispose()
def move(self): if self.distance(self.box_pos, self.goal) < 1.5: print 'Goal reached!!!!!!!!!!!!!' self.v = 0. self.omega = 0. return euler = tf.transformations.euler_from_quaternion(self.R) if Motion.is_box_in_front_of_robot(RobotLocation(x=self.X[0], y=self.X[1], theta=euler[2]), BoxLocation(x=self.box_pos[0], y=self.box_pos[1]), goal=Point(x=self.goal[0], y=self.goal[1]), behind_x=4.5): print 'Pushing!!!!!!!!!!!!!' self.v, self.omega = motion.update_motion(self.X, self.R, self.push()) return self.v, self.omega = motion.update_motion(self.X, self.R, self.acquire())
def run_server(): global bot global cam global motion try: app.bot_config = Config.read() bot = CoderBot.get_instance( servo=(app.bot_config.get("move_motor_mode") == "servo")) cam = Camera.get_instance() motion = Motion.get_instance() except ValueError as e: app.bot_config = {} logging.error(e) if app.bot_config.get('load_at_start') and len( app.bot_config.get('load_at_start')): app.prog = app.prog_engine.load(app.bot_config.get('load_at_start')) bot.set_callback(PIN_PUSHBUTTON, button_pushed, 100) bot.say(app.bot_config.get("sound_start")) app.run(host="0.0.0.0", port=8080, debug=True, use_reloader=False, threaded=True)
def _safetyStop(self, func, *args, **kwargs): """ The generic safety wrapper for the Turtlebot. Args: func (function): A Turtlebot behavioral function. *args: The arguments to func. **kwargs: Keyword arguments to func. """ # if we see a cliff or get picked up, stop now if self.sensors.cliff or self.sensors.wheeldrop: Motion.stop(self, now=True) # if we hit something, stop now elif self.sensors.bump: Motion.stopLinear(self, now=True) # if we see something coming, stop gently elif self.sensors.obstacle: Motion.stopLinear(self) # otherwise, stay the course else: func(self, *args, **kwargs)
from flask import Flask from temperature import TemperatureSensor from motion import Motion import RPi.GPIO as GPIO import time GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) temperature = TemperatureSensor(14) motion = Motion(17) motion.run() app = Flask(__name__) @app.route("/metrics") def metrics(): temperature_data = "" result = temperature.read() if result.is_valid(): temperature_data = f"""pihome_temperature {result.temperature} pihome_humidity {result.humidity}""" return f"""{temperature_data} pihome_movement {motion.read()}""", 200, { 'Content-Type': 'text/plain; charset=utf-8' } if __name__ == "__main__":
import logging from motion import Motion log = logging.getLogger(__name__) tasks = Motion(stream_name='borgstrom-test') @tasks.respond_to('simple') def simple(payload): log.info("Simple task: %s", payload)
def handle_bot(): """ Execute a bot command """ bot = CoderBot.get_instance() cam = Camera.get_instance() motion = Motion.get_instance() audio = Audio.get_instance() cmd = request.args.get('cmd') param1 = request.args.get('param1') param2 = request.args.get('param2') print('/bot', json.dumps(request.args)) if cmd == "move": bot.move(speed=int(param1), elapse=float(param2)) elif cmd == "turn": bot.turn(speed=int(param1), elapse=float(param2)) elif cmd == "move_motion": motion.move(dist=float(param2)) elif cmd == "turn_motion": motion.turn(angle=float(param2)) elif cmd == "stop": bot.stop() try: motion.stop() except Exception: logging.warning("Camera not present") elif cmd == "take_photo": try: cam.photo_take() audio.say(app.bot_config.get("sound_shutter")) except Exception: logging.warning("Camera not present") elif cmd == "video_rec": try: cam.video_rec() audio.say(app.bot_config.get("sound_shutter")) except Exception: logging.warning("Camera not present") elif cmd == "video_stop": try: cam.video_stop() audio.say(app.bot_config.get("sound_shutter")) except Exception: logging.warning("Camera not present") elif cmd == "say": logging.info("say: " + str(param1) + " in: " + str(get_locale())) audio.say(param1, get_locale()) elif cmd == "halt": logging.info("shutting down") audio.say(app.bot_config.get("sound_stop")) bot.halt() elif cmd == "restart": logging.info("restarting bot") bot.restart() elif cmd == "reboot": logging.info("rebooting") bot.reboot() return "ok"
class Mirai: ip = port = _motion = _speech = _tablet = _face_detection = _wave_detection = _audio_player = _speech_recognition = _system = session = None def load_modules(self): self._speech = Speech(self.session) self._motion = Motion(self.session) self._tablet = Tablet(self.session) self._face_detection = FaceDetection(self.session, self) self._wave_detection = WavingDetection(self.session) self._audio_player = AudioPlayer(self.session) self._speech_recognition = SpeechRecognition(self.session) self._system = System(self.session) def __init__(self, ip, port=9559): self.ip = ip self.port = port def start(self): self.connect() self.load_modules() self.motion().stand() def connect(self): self.session = qi.Session() try: self.session.connect("tcp://" + self.ip + ":" + str(self.port)) self.load_modules() except RuntimeError: print("Can't connect to Naoqi at ip \"" + self.ip + "\" on port " + str(self.port)) self.checkConnection() def checkConnection(self): connected = self.session.isConnected() print("Connection status: " + str(connected)) if not connected: time.sleep(.1) print("Trying to reconnect...") self.connect() def speech(self): self.checkConnection() return self._speech def motion(self): self.checkConnection() return self._motion def tablet(self): self.checkConnection() return self._tablet def face_detection(self): self.checkConnection() return self._face_detection def wave_detection(self): self.checkConnection() return self._wave_detection def audio_player(self): self.checkConnection() return self._audio_player def speech_recognition(self): self.checkConnection() return self._speech_recognition def system(self): self.checkConnection() return self._system def sleep(self): return self._motion.sleep() def wake_up(self): return self._motion.wake_up()
def visualize(frame): """ Visualizes a frame. """ cv2.imshow('frame', frame) def shouldExit(): # TODO figure this out return cv2.waitKey(1) & 0xFF == ord('q') def cleanup(): """ Prepare for shutting down. """ cv2.release() cv2.destroyAllWindows() camera = cv2.VideoCapture(0) motion = Motion(snapFrame(camera)) while(camera.isOpened()): frame = snapFrame(camera) if motion.detectedMotion(frame): motion.paintDebug(frame) visualize(frame) if(shouldExit()): break cleanup(camera)
def angle_to_point_behind_box(self): angle_g_to_t= Motion.angle_between_points(self.boxLocation, self.target) angle_r_to_t= Motion.angle_between_points(self.boxLocation, self.robotLocation) theta = Motion.delta_between_angles(angle_g_to_t,angle_r_to_t) theta = Motion.fix_angle_180(theta) return theta
def ManageMotion(): motion = Motion() # Params to change on the fly cv2.namedWindow('paramMinMaxPalm') cv2.createTrackbar('MAX H', 'paramMinMaxPalm', 1, 255, nothing) cv2.createTrackbar('MAX S', 'paramMinMaxPalm', 1, 255, nothing) cv2.createTrackbar('MAX V', 'paramMinMaxPalm', 1, 255, nothing) cv2.createTrackbar('MIN H', 'paramMinMaxPalm', 1, 255, nothing) cv2.createTrackbar('MIN S', 'paramMinMaxPalm', 1, 255, nothing) cv2.createTrackbar('MIN V', 'paramMinMaxPalm', 1, 255, nothing) cv2.setTrackbarPos('MAX H', 'paramMinMaxPalm', config['hand']['hsv_palm_max'][0]) cv2.setTrackbarPos('MAX S', 'paramMinMaxPalm', config['hand']['hsv_palm_max'][1]) cv2.setTrackbarPos('MAX V', 'paramMinMaxPalm', config['hand']['hsv_palm_max'][2]) cv2.setTrackbarPos('MIN H', 'paramMinMaxPalm', config['hand']['hsv_palm_min'][0]) cv2.setTrackbarPos('MIN S', 'paramMinMaxPalm', config['hand']['hsv_palm_min'][1]) cv2.setTrackbarPos('MIN V', 'paramMinMaxPalm', config['hand']['hsv_palm_min'][2]) cv2.namedWindow('paramSearchRangeHand') cv2.createTrackbar('INC H', 'paramSearchRangeHand', 1, 255, nothing) cv2.createTrackbar('INC S', 'paramSearchRangeHand', 1, 255, nothing) cv2.createTrackbar('INC V', 'paramSearchRangeHand', 1, 255, nothing) cv2.createTrackbar('DEC H', 'paramSearchRangeHand', 1, 255, nothing) cv2.createTrackbar('DEC S', 'paramSearchRangeHand', 1, 255, nothing) cv2.createTrackbar('DEC V', 'paramSearchRangeHand', 1, 255, nothing) cv2.setTrackbarPos('INC H', 'paramSearchRangeHand', config['hand']['hsv_hand_inc'][0]) cv2.setTrackbarPos('INC S', 'paramSearchRangeHand', config['hand']['hsv_hand_inc'][1]) cv2.setTrackbarPos('INC V', 'paramSearchRangeHand', config['hand']['hsv_hand_inc'][2]) cv2.setTrackbarPos('DEC H', 'paramSearchRangeHand', config['hand']['hsv_hand_dec'][0]) cv2.setTrackbarPos('DEC S', 'paramSearchRangeHand', config['hand']['hsv_hand_dec'][1]) cv2.setTrackbarPos('DEC V', 'paramSearchRangeHand', config['hand']['hsv_hand_dec'][2]) frameIdx = 0 currentSliding = "None" timeElapsedSinceLastSlide = time.time() if not motion.IsActive(): print("No camera found") # Debug Palm Tracking (See palm color detection in real time - consuming) motion.debugPalm = False while motion.IsActive(): # Refresh OpenCV Windows cv2.waitKey(1) main.ManageCommands(motion) # Refresh config from param config['hand']['hsv_palm_max'] = [cv2.getTrackbarPos('MAX H', 'paramMinMaxPalm'), cv2.getTrackbarPos('MAX S', 'paramMinMaxPalm'), cv2.getTrackbarPos('MAX V', 'paramMinMaxPalm')] config['hand']['hsv_palm_min'] = [cv2.getTrackbarPos('MIN H', 'paramMinMaxPalm'), cv2.getTrackbarPos('MIN S', 'paramMinMaxPalm'), cv2.getTrackbarPos('MIN V', 'paramMinMaxPalm')] config['hand']['hsv_hand_inc'] = [cv2.getTrackbarPos('INC H', 'paramSearchRangeHand'), cv2.getTrackbarPos('INC S', 'paramSearchRangeHand'), cv2.getTrackbarPos('INC V', 'paramSearchRangeHand')] config['hand']['hsv_hand_dec'] = [cv2.getTrackbarPos('DEC H', 'paramSearchRangeHand'), cv2.getTrackbarPos('DEC S', 'paramSearchRangeHand'), cv2.getTrackbarPos('DEC V', 'paramSearchRangeHand')] # Manage motion and gestures motion.GetInformationOnNextFrame() # Infos movement try: cv2.putText(motion.frameDifference, "Elapsed: " + str(motion.TimeElapsedSinceLastMotion()) + "/" + str(config['timeToWaitWhenNoMovementBeforeSleep']), (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.putText(motion.frameDifference, "Movement: " + str(motion.movementRatio) + "/" + str(config['frameDifferenceRatioForMovement']), (5, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.imshow('Movement detected', motion.frameDifference) except: pass if motion.TimeElapsedSinceLastMotion() > config['timeToWaitWhenNoMovementBeforeSleep']: cv2.putText(motion.currentFrame, "SLEEP MODE NO MOVEMENT", (5, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.imshow('Current Frame', motion.currentFrame) time.sleep(config['timeToSleepWhenNoMovement']) gesture = motion.GetGesture() if gesture.properties['palm']: print("PALM") threading.Thread(target=main.SendGesture, args=(gesture,)).start() # Gesture infos try: #print("Frame: " + str(frameIdx)) frameIdx += 1 #print(gesture.properties) if motion.handTracked is None: cv2.putText(motion.currentFrame, "Seach Palm", (5, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1) cv2.imshow('Current Frame', motion.currentFrame) cv2.imshow('Mask from HSV Range', motion.mask_rafined) cv2.putText(motion.currentFrame, "Width: " + str(gesture.recW), (5, 200), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1) cv2.putText(motion.currentFrame, "Height: " + str(gesture.recH), (5, 250), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1) cv2.putText(motion.currentFrame, "SRatio: " + str(gesture.recH / gesture.recW), (5, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1) cv2.rectangle(motion.currentFrame, (gesture.recX, gesture.recY), (gesture.recX + gesture.recW, gesture.recY + gesture.recH), (0, 255, 0), 2) cv2.putText(motion.currentFrame, "MSize: " + str(gesture.moments['m00']), (5, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, 200, 1) cv2.drawContours(motion.currentFrame, [gesture.handContour], 0, (0, 255, 0), 3) cv2.circle(motion.currentFrame, (int(gesture.centerX), int(gesture.centerY)), int(gesture.radius / 1.5), [255, 0, 255], 1) cv2.circle(motion.currentFrame, (int(gesture.centerX), int(gesture.centerY)), int(gesture.radius / 3.2), [255, 0, 255], 1) cv2.putText(motion.currentFrame, "A: " + str(gesture.properties['angle']), (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, 200) if gesture.properties['palm']: cv2.putText(motion.currentFrame, "PALM", (5, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, 150, 3) elif gesture.properties['thumbsUp']: cv2.putText(motion.currentFrame, "THUMBS UP", (5, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, 150, 3) elif gesture.properties['thumbsDown']: cv2.putText(motion.currentFrame, "THUMBS DOWN", (5, 400), cv2.FONT_HERSHEY_SIMPLEX, 2, 150, 3) if gesture.properties['slideUp'] or gesture.properties['slideDown'] or gesture.properties['slideRight'] or gesture.properties['slideLeft']: timeElapsedSinceLastSlide = time.time() currentSliding ="UP" if gesture.properties['slideUp'] else "DOWN" if gesture.properties['slideDown'] else "RIGHT" if gesture.properties['slideRight'] else "LEFT" if time.time() - timeElapsedSinceLastSlide < 1: cv2.putText(motion.currentFrame, "Sliding " + currentSliding, (5, 450), cv2.FONT_HERSHEY_SIMPLEX, 2, 150, 3) for defect in gesture.palmDefects: cv2.line(motion.currentFrame, defect[0], defect[1], [255, 0, 0], 2) cv2.circle(motion.currentFrame, defect[2], 6, [0, 0, 255], -1) cv2.imshow('Current Frame', motion.currentFrame) except: pass pressedKey = cv2.waitKey(33) if pressedKey == 27: # Esc key to stop break motion.Dispose() os._exit(1)
def playMotionFile(self, filename): motion = Motion() motion.read(filename) self.playMotion(motion)