def __init__(self, camara, mostrar_pantalla, testing_cam): self.camara = camara self.mostrar_pantalla = mostrar_pantalla self.testing_cam = testing_cam tam_punto_caliente = 1000 tam_maleza = 1500 self.vis = vision.vision(tam_punto_caliente, tam_maleza, self.mostrar_pantalla)
def move(self, target=None): visible_tiles = vision.vision(4, self.world_map, self.tile) flight_tile = vision.find_target(visible_tiles, self.predators) target_tile = vision.find_target(visible_tiles, self.prey) if flight_tile: move_to_tile = vision.flee(self.tile, flight_tile, self.world_map) if Sprite.is_movable_terrain( self, move_to_tile) and self.not_contains_sprite(move_to_tile): Sprite.move(self, move_to_tile) else: Sprite.move(self) elif target_tile: move_to_tile = vision.approach(self.tile, target_tile, self.world_map) if Sprite.is_movable_terrain( self, move_to_tile) and Sprite.not_contains_sprite( self, move_to_tile, self.prey): if move_to_tile == target_tile: move_to_tile.contains_sprite.die() Sprite.move(self, move_to_tile) else: Sprite.move(self) else: Sprite.move(self)
def move(self): """ :return: """ visible_tiles = vision.vision(4, self.world_map, self.tile) target_tile = vision.find_target(visible_tiles, self.prey) if target_tile: move_to_tile = vision.approach(self.tile, target_tile, self.world_map) if self.is_movable_terrain(move_to_tile) and self.not_contains_sprite(move_to_tile, self.prey): AnimalSprite.move(self, move_to_tile) else: AnimalSprite.move(self) else: AnimalSprite.move(self)
def move(self, target=None): visible_tiles = vision.vision(15, self.world_map, self.tile) visible_tiles = filter(BeesSprite.pollinated_filter, visible_tiles) target_tile = vision.find_target(visible_tiles, self.prey) if target_tile: move_to_tile = vision.approach(self.tile, target_tile, self.world_map) if Sprite.is_movable_terrain(self, move_to_tile) and \ Sprite.not_contains_sprite(self, move_to_tile, self.prey): if move_to_tile == target_tile: move_to_tile.contains_sprite.pollinate() Sprite.move(self, move_to_tile) else: Sprite.move(self) else: Sprite.move(self)
def move(self, target=None): """ :return: """ visible_tiles = vision.vision(10, self.world_map, self.tile) target_tile = vision.find_target(visible_tiles, self.prey) if target_tile: move_to_tile = vision.approach(self.tile, target_tile, self.world_map) if self.is_movable_terrain( move_to_tile) and self.not_contains_sprite( move_to_tile, self.prey): if move_to_tile == target_tile: move_to_tile.contains_sprite.die() AnimalSprite.move(self, move_to_tile) elif not self.is_leader: leader_tile = self.find_leader() if leader_tile: move_to_tile = vision.approach(self.tile, leader_tile, self.world_map) if self.is_movable_terrain( move_to_tile) and self.not_contains_sprite( move_to_tile, self.prey): AnimalSprite.move(self, move_to_tile) else: AnimalSprite.move(self) else: AnimalSprite.move(self) elif not self.is_leader: leader_tile = self.find_leader() if leader_tile: chance = random.randint(0, 10) if chance == 0: AnimalSprite.move(self) else: move_to_tile = vision.approach(self.tile, leader_tile, self.world_map) if self.is_movable_terrain( move_to_tile) and self.not_contains_sprite( move_to_tile, self.prey): AnimalSprite.move(self, move_to_tile) else: AnimalSprite.move(self) else: AnimalSprite.move(self)
def move(self, target=None): """ @Override Move sprite to a specified target tile (target). Otherwise, moves sprite to a random adjacent tile. :param target: Target tile to move sprite. """ visible_tiles = vision.vision(self.vision, self.world_map, self.tile) target_tile = vision.find_target(visible_tiles, self.prey) if target_tile: move_to_tile = vision.approach(self.tile, target_tile, self.world_map) if self.is_movable_terrain( move_to_tile) and self.not_contains_sprite( move_to_tile, self.prey): if move_to_tile == target_tile: move_to_tile.contains_sprite.die() AnimalSprite.move(self, move_to_tile) else: AnimalSprite.move(self) else: AnimalSprite.move(self)
def applyComputerVision(frameCount): # grab global references to the video stream, output frame, and # lock variables global vs, outputFrame, lock md = vision.vision() #Run Computer vision algorithms total = 0 distances = [] currentTick = 0 while True: # read the next frame from the video stream, resize it, # convert the frame to grayscale, and blur it frame = vs.read() frame = imutils.resize(frame, width=400) frame = imutils.rotate(frame, 180) # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # gray = cv2.GaussianBlur(gray, (7, 7), 0) # for i in range(0, len(distances)): # currentDistance = distances[i] # distanceText = "Sensor " + str(i+1) + ": " + '{0:.2f}'.format(currentDistance) + " cm" # textY = (frame.shape[0]-(10*i)-(5+(5*i))) # textX = 5 # cv2.putText(frame, distanceText, (textX, textY), cv2.FONT_HERSHEY_SIMPLEX, .5, (255,255,255), 1) # grab the current timestamp and draw it on the frame # timestamp = datetime.datetime.now() # cv2.putText(frame, timestamp.strftime( # "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10), # cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) with lock: outputFrame = frame.copy()
def __init__(self, game): self.vision = vision() self.visualizer = visualizer() self.current_tile = (0, 0) self.game = game
from vision import vision from picamera import PiCamera import jsonread import RPi.GPIO as GPIO # IEEE competition code address1 = 10 # Address to the drive nano address2 = 20 # Address to the arm nano bus = smbus.SMBus(1) camera = PiCamera() camera.resolution = (600, 600) camera.rotation = 180 myvision = vision() myNano = NanoManager() start_button = Button(button_pin) while True: print('begin') #check if file is in location #if true, proceed to round code (and button) #if not, run through jsonread while True: if file in os.listdir("/home/pi/locations/"): break else: # Get JSON file location
def main(): """ Main Script """ while not rospy.is_shutdown(): planner = PathPlanner("right_arm") limb = Limb("right") joint_angles = limb.joint_angles() print(joint_angles) camera_angle = { 'right_j6': 1.72249609375, 'right_j5': 0.31765625, 'right_j4': -0.069416015625, 'right_j3': 1.1111962890625, 'right_j2': 0.0664150390625, 'right_j1': -1.357775390625, 'right_j0': -0.0880478515625 } limb.set_joint_positions(camera_angle) limb.move_to_joint_positions(camera_angle, timeout=15.0, threshold=0.008726646, test=None) #drawing_angles = {'right_j6': -2.00561328125, 'right_j5': -1.9730205078125, 'right_j4': 1.5130146484375, 'right_j3': -1.0272568359375, 'right_j2': 1.24968359375, 'right_j1': -0.239498046875, 'right_j0': 0.4667275390625} #print(joint_angles) #drawing_angles = {'right_j6': -1.0133310546875, 'right_j5': -1.5432158203125, 'right_j4': 1.4599892578125, 'right_j3': -0.04361328125, 'right_j2': 1.6773486328125, 'right_j1': 0.019876953125, 'right_j0': 0.4214736328125} drawing_angles = { 'right_j6': 1.9668515625, 'right_j5': 1.07505859375, 'right_j4': -0.2511611328125, 'right_j3': 0.782650390625, 'right_j2': 0.25496875, 'right_j1': -0.3268349609375, 'right_j0': 0.201146484375 } raw_input("Press <Enter> to take picture: ") waypoints_abstract = vision() print(waypoints_abstract) #ar coordinate : x = 0.461067 y = -0.235305 #first get the solution of the maze #solutionpoints = [(0,0),(-0.66,0.16), (-0.7, 0.4)] # Make sure that you've looked at and understand path_planner.py before starting tfBuffer = tf2_ros.Buffer() tfListener = tf2_ros.TransformListener(tfBuffer) r = rospy.Rate(10) #find trans from while not rospy.is_shutdown(): try: trans = tfBuffer.lookup_transform('base', 'ar_marker_0', rospy.Time()).transform break except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException): if tf2_ros.LookupException: print("lookup") elif tf2_ros.ConnectivityException: print("connect") elif tf2_ros.ExtrapolationException: print("extra") # print("not found") pass r.sleep() mat = as_transformation_matrix(trans) point_spaces = [] for point in waypoints_abstract: # for point in solutionpoints: point = np.array([point[0], point[1], 0, 1]) point_space = np.dot(mat, point) point_spaces.append(point_space) print(point_spaces) length_of_points = len(point_spaces) raw_input("Press <Enter> to move the right arm to drawing position: ") limb.set_joint_positions(drawing_angles) limb.move_to_joint_positions(drawing_angles, timeout=15.0, threshold=0.008726646, test=None) ## ## Add the obstacle to the planning scene here ## #add obstacle size = [0.78, 1.0, 0.05] name = "box" pose = PoseStamped() pose.header.frame_id = "base" pose.pose.position.x = 0.77 pose.pose.position.y = 0.0 pose.pose.position.z = -0.18 #Orientation as a quaternion pose.pose.orientation.x = 0.0 pose.pose.orientation.y = 0.0 pose.pose.orientation.z = 0.0 pose.pose.orientation.w = 1.0 planner.add_box_obstacle(size, name, pose) #limb.set_joint_positions( drawing_angles) #limb.move_to_joint_positions( drawing_angles, timeout=15.0, threshold=0.008726646, test=None) #starting position x, y, z = 0.67, 0.31, -0.107343 goal_1 = PoseStamped() goal_1.header.frame_id = "base" #x, y, and z position goal_1.pose.position.x = x goal_1.pose.position.y = y goal_1.pose.position.z = z #Orientation as a quaternion goal_1.pose.orientation.x = 0.0 goal_1.pose.orientation.y = -1.0 goal_1.pose.orientation.z = 0.0 goal_1.pose.orientation.w = 0.0 while not rospy.is_shutdown(): try: waypoint = [] for point in point_spaces: goal_1.pose.position.x = point[0] goal_1.pose.position.y = point[1] goal_1.pose.position.z = -0.113343 #set this value when put different marker waypoint.append(copy.deepcopy(goal_1.pose)) plan, fraction = planner.plan_straight(waypoint, []) print(fraction) raw_input("Press <Enter> to move the right arm to draw: ") if not planner.execute_plan(plan): raise Exception("Execution failed") except Exception as e: print e traceback.print_exc() else: break raw_input("Press <Enter> to start again: ")
import argparse import asyncio from IK import IK from servoPosition import servoPosition from constants import const, colors from utils import echo from simulator import simulator from audio import VoiceRecognition from sequencer import sequencer from vision import vision import speech_recognition as sr import time sim = simulator() vis = vision(sim) voice = VoiceRecognition() seq = sequencer(sim, vis) # Esta funcion es la que se encarga de recibir el codigo de la orden # y ejecutar la funcion o el workflow pertinente. def procesar_orden(orden, objeto): if orden == const.ORDEN_VEN: # ven seq.ven() echo(const.GAARI_SAYS + "Gaari viene", color=colors.OKGREEN) elif orden == const.ORDEN_ABRE: # abre seq.abre() echo(const.GAARI_SAYS + "Gaari está abriendo", color=colors.OKGREEN) # elif orden == const.ORDEN_AGARRA: # agarra # seq.agarra()