Beispiel #1
0
    def _init_vision(self):
        udev = pyudev.Context()
        cams = list(
            udev.list_devices(
                subsystem="video4linux",
                # For now, find devices that use this driver
                ID_USB_DRIVER="uvcvideo"))

        if len(cams) == 0:
            "Camera isn't connected."
            return

        camdev = cams[0].device_node

        # Find libkoki.so:
        libpath = None
        if "LD_LIBRARY_PATH" in os.environ:
            for d in os.environ["LD_LIBRARY_PATH"].split(":"):
                l = glob.glob("%s/libkoki.so*" % os.path.abspath(d))

                if len(l):
                    libpath = os.path.abspath(d)
                    break

        if libpath == None:
            v = vision.Vision(camdev, "/usr/lib")
        else:
            v = vision.Vision(camdev, libpath)

        self.vision = v
Beispiel #2
0
def main():
    r_instance.R = Robot()
    set_debug_mode(True)
    dump_battery()

    # Initialise modules
    task.pool = task.TaskPool()
    control.control = control.Control()
    vision.vision = vision.Vision()
    arduino_interface.arduino = arduino_interface.Arduino()
    game_map.game_map = game_map.Map()

    # Move initial token
    task.pool.execute(control.MoveTask(1.2))
    task.pool.execute(control.MoveTask(-0.6))
    tokens = vision.vision.capture()[0]
    our_marker_id = 0
    if len(tokens) > 0:
        our_marker_id = tokens[0].info.code
    log("Robot", "our_marker_id = {0}".format(our_marker_id))
    task.pool.execute(control.MoveTask(-0.7))

    # Look at remaining tokens and move into them
    task.pool.execute(control.RotateTask(-math.pi * 0.35))

    # Attempt to correct itself
    task.pool.execute(TokenCorrection(our_marker_id))
    task.pool.execute(control.MoveTask(0.775))
    for i in xrange(2):
        task.pool.execute(control.RotateTask(math.pi * 0.4, 3.0))
        task.pool.execute(control.MoveTask_Stepper(2.0, 3.0, 8))
    """
    # Move away, turn 180 degrees then calibrate
    task.pool.add( control.MoveTask( -1.5 ) )
    task.pool.add( control.RotateTask( -math.pi * 0.5 ) )
    task.pool.add( control.MoveTask( 2, 0.75 ) )
    
    # Now we know where we are, move to where our tokens reside and grab one
    target_point = game_map.game_map.starting_position
    target_point.x = 8 - target_point.x
    task.pool.add( FigureOutPosition() )
    task.pool.add( MoveToNextPoint( target_point ) )
    task.pool.add( SearchForToken() )
    task.pool.add( control.RotateTask( -math.pi * 0.5 ) )
    task.pool.add( control.MoveTask( 2 ) )
    task.pool.add( control.MoveTask( -1 ) )
    
    # Move to the middle square
    task.pool.add( FigureOutPosition() )
    task.pool.add( MoveToNextPoint( Position( 5, 5 ) ) )
    """

    # Enter the main loop
    task.pool.execute(task.ExitLoop())
    task.pool.run()

    # Final words..
    log("Robot", "Task queue finished executing, exiting...")
    dump_battery()
Beispiel #3
0
 def robotInit(self):
     # when the robot code starts (only once, not each time it's enabled)
     # Declare Gamepads and CANTalons here.
     # Don't access encoder values here; do that in teleopInit or
     # autonomousInit.
     self.visionary = vision.Vision()
     self.xCenterDist = 0
     pass
Beispiel #4
0
	def __init__(self, width_x, height_y):
		super().__init__()
		#---------------------canvas---------------------------------
		with self.canvas: #paint playing field
			Color(1, 1, 1) #white color
			(init_x, init_y) = (Window.size[0] / 2 - width_x * 10, Window.size[1] / 2 - height_y * 10)
			Rectangle(pos = (init_x, init_y), size=(width_x * 20, height_y * 20)) #adaptive playing field with fix cell (20 px)
			Color(0, 0, 0) #black color
			for i in range(height_y):
				Line(points=[init_x, init_y + i*20, init_x + width_x * 20, init_y + i*20])
			for i in range(width_x):
				Line(points=[init_x + i*20, init_y, init_x + i*20, init_y + height_y * 20])
		#----------------------------------------------------------------
		self.width_x = width_x #width_x of playing field
		self.height_y = height_y #height_y of playing field
		self.sourceX = int(Window.size[0] / 2) - width_x * 10 #left edge point (x axes)
		self.sourceY = int(Window.size[1] / 2) - height_y * 10 #left edge point (y axes)
		self.worm = None #instance of the snake
		self.fruit = None #instance of the fruit
		self.count_start = -1 #count of game start
		self.population = 250 #number of snakes in population
		self.vision_dir = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1)] #set of the vision directions
		self.vision = vision.Vision(self.vision_dir, self.sourceX, self.sourceY, self.width_x, self.height_y) #instance for vision
		self.dir = (1, 0) #start direction
		self.ff = neural.FeedForward(self.population) #instance for neural network
		t = self.ff.load_snakes() #load all snakes from file
		self.snakes = t[0]
		self.random_seed = t[1]
		self.snakes_offspring = None #instance for offspring snakes
		self.mutation_rank = 0.1 #rank of mutation
		self.cur_snake = None #instance for current playing snake
		self.matrices = None #instance of matrix for current snake
		self.count_moves = 0 #count of the snake moves
		self.count_fruit = 0 #count of fruit that was eatten by snake
		self.fitness_source = [0 for i in range(self.population)] #var for save fitness score every snake in source population
		self.fitness_offspring = [0 for i in range(self.population)] #var for save fitness score every snake in offspring population
		self.count_try = 2 #number of attemps to play the game of the population
		self.current_try = 1 #current try
		self.queue_population = False #queue population (if false - queue of source population, true - offspring)
		self.generation = 0 #count generation
		self.sum_fruit = 0 
		#-----------------------------------------------------------------
		self.text1 = Label(pos=(self.sourceX + 70, self.sourceY - 100), text='Count start:') 
		self.text2 = Label(pos=(self.sourceX + 240, self.sourceY - 100), text='0')
		self.add_widget(self.text1)
		self.add_widget(self.text2) #labels for display count start
		self.text3 = Label(pos=(self.sourceX + 70, self.sourceY + self.height_y*20 + 30), text="Generation:")
		self.text4 = Label(pos=(self.sourceX + 240, self.sourceY + self.height_y*20 + 30), text="0")
		self.add_widget(self.text3)
		self.add_widget(self.text4) #labels for display generation
		
		self.text5 = Label(pos=(self.sourceX + 70, self.sourceY - 250), text='0')
		self.add_widget(self.text5)
		self.text6 = Label(pos=(self.sourceX + 70, self.sourceY - 175), text='0')
		self.add_widget(self.text6)
		self.text7 = Label(pos=(self.sourceX + 240, self.sourceY - 250), text='0')
		self.add_widget(self.text7)
Beispiel #5
0
 def __init__(self, cv2):
     self.db = db.Database()
     self.stm = short_term_memory.StmUnit(self.db)
     self.vision = vision.Vision(cv2, self.db, self.stm)
     self.upei = upei.Upei(cv2)
     self.cv2 = cv2
     self.debug = True
     self.show_upei = True
     self.framerate = 2 # number of frames per second
     self.prev_users_in_room = {}
     self.users_in_room = {}
Beispiel #6
0
    def autonomousInit(self):
        self.vision = vision.Vision()
        self.turnAlignLog = LogState("Turn align")
        self.targetWidthLog = LogState("Target width")

        self.currentSpeed = 0

        if dashboard.getSwitch("Drive voltage mode", False):
            self.holoDrive.setDriveMode(DriveInterface.DriveMode.VOLTAGE)
        else:
            self.holoDrive.setDriveMode(DriveInterface.DriveMode.POSITION)
Beispiel #7
0
    def _init_vision(self, camdev = "/dev/video0"):
        if not os.path.exists(camdev):
            "Camera isn't connected."
            return

        # Find libsric.so:
        libpath = None
        if "LD_LIBRARY_PATH" in os.environ:
            for d in os.environ["LD_LIBRARY_PATH"].split(":"):
                l = glob.glob( "%s/libkoki.so*" % os.path.abspath( d ) )

                if len(l):
                    libpath = os.path.abspath(d)
                    break

        if libpath == None:
            v = vision.Vision(camdev)
        else:
            v = vision.Vision(camdev, libpath)

        self.vision = v
    def __init__(self, ip="127.0.0.1", port=9559):
        connection_url = "tcp://" + ip + ":" + str(port)
        print("Connecting to nao-qi at {0} ...".format(connection_url))
        self.app = qi.Application(["--qi-url=" + connection_url])
        self.app.start()
        self.session = self.app.session

        self.audio = audio.Audio(self.session)
        self.vision = vision.Vision(self.session)
        self.touch = touch.Touch(self.session)
        self.motion = motion.Motion(self.session)
        self.behavior = behavior.Behavior(self.session)
        self.tablet = tablet.Tablet(self.session)

        self.audio.set_callback(self.send)  # Set send fallback
        self.vision.set_callback(self.send)  # Set send fallback
        self.touch.set_callback(self.send)  # Set send fallback
        self.motion.set_callback(self.send)  # Set send fallback
        self.behavior.set_callback(self.send)  # Set send fallback
Beispiel #9
0
    def create_vision(self, args):
        if args.img_source == 'rt':
            img_provider = image_provider.RCVisionProvider(args.location)
        elif args.img_source == 'img':
            img_provider = image_provider.StorageVisionProvider(
                os.path.join(args.location))
        elif args.img_source == 'dir':
            img_provider = image_provider.DirectoryVisionProvider(
                os.path.join(args.location))

        self.vision = vision.Vision(
            img_provider,
            os.path.join(self._project_path,
                         'training-coding/models/roboheads-ssd_mobilenet_v1',
                         'frozen_inference_graph.pb'))
        self.vision.updated.connect(self.on_vision_updated)

        self.vision_thread = threading.Thread(target=self.vision.run)
        self.vision_thread.start()
Beispiel #10
0
def main():
    try:
        arm = Arm('/dev/ttyACM0')
    except Exception as e:
        arm = Arm('/dev/ttyACM1')

    if len(sys.argv) > 1 and sys.argv[1] == 'shell':
        interactive_shell(arm)

    if len(sys.argv) > 1 and sys.argv[1] == 'height':
        import collections
        import vision

        v = vision.Vision()
        q = collections.deque(maxlen=4)

        while True:
            h = v.get_pen_height()
            q.append(h if h else 0)
            print(sum(q) / 4)
Beispiel #11
0
def interactive_shell(arm):
    import vision
    v = None

    while True:
        command = input('arm shell >> ')
        name, *args = command.split(' ')

        try:
            if name == "":
                print('Please enter a command')
            elif name == "move":
                if len(args) == 2:
                    target = np.array([float(args[0]), float(args[1])])
                    arm.move_to(target, speed=3, auto_height=True)
                elif len(args) == 3:
                    target = np.array(
                        [float(args[0]),
                         float(args[1]),
                         float(args[2])])
                    arm.move_to(target, speed=3, auto_height=False)

            elif name == "up":
                arm.up()

            elif name == "down":
                arm.down()
            elif name == "height":
                if not v:
                    v = vision.Vision()
                avg_amount = 10
                h_list = (v.get_pen_height() for _ in range(avg_amount))
                h = sum(h_list) / avg_amount
                print('Height:', round(h, 2), 'mm')

        except Exception as e:
            print("Error while processing command")
Beispiel #12
0
import motor_interface
import vision
import image_preperation

img = image_preperation.prepare_image("bbb_logo.jpeg")

app = vision.Vision(motor_interface.MotorInterface(0, 0),
                    100,
                    5,
                    150,
                    spray_point_offset=(0, -25),
                    image_to_print=img)

app.run()
Beispiel #13
0
import vision
import camera
import cv2

vis = vision.Vision()
cam = camera.Camera()

while (1):
    img = cam.getFrame()
    cv2.imshow('img', img)
    cv2.waitKey(1)
    # lines are green
    lines = vis.detect_lines(img)

# car is blue
#car_pos = vis.detect_car(img)

# other cars are red
#other_cars = vis.detect_other_cars(img)

# and so are you
Beispiel #14
0
def main():
    # Setup all modules
    vision = v.Vision()
    speech = s.Speech()
    motor = m.Motor()
    motion_detector = md.Motion_Detector()
Beispiel #15
0
 def setup(self):
     self.vision = vision.Vision() 
Beispiel #16
0
import io

# FILE IMPORTS

import vision

##### FLASK SERVER

app = Flask("API's")
cors = CORS(app)
app.config["CORS_HEADERS"] = "Content-Type"

######## CONFIGS FOR PROGRAMS #########
####### SETUP

Vision = vision.Vision()
GratingPhases = vision.GratingPhases()

# headers for sending
headers = {"Content-Type": "Application/json"}

###########################


#### FOR FLASK FUNCTIONS ####
def getIp():
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    try:
        s.connect(("10.255.255.255", 1))
        IP = s.getsockname()[0]
    except:
Beispiel #17
0
    difficulty_input = ""
    easy_inputs = {"e", "easy"}
    medium_inputs = {"m", "med", "medium"}
    hard_inputs = {"h", "hard"}
    while difficulty_input not in easy_inputs.union(medium_inputs).union(
            hard_inputs):
        difficulty_input = input(
            "Select difficulty level: EASY (E), MEDIUM (M), HARD (H)").lower()
        if difficulty_input in easy_inputs:
            difficulty = Difficulty.EASY
        elif difficulty_input in medium_inputs:
            difficulty = Difficulty.MEDIUM
        elif difficulty_input in hard_inputs:
            difficulty = Difficulty.HARD

    v = vision.Vision()
    td_agent = TDAgent(WHITE, model, v, difficulty)
    human_agent = HumanAgent(BLACK, v)
    agents_list = [td_agent, human_agent]
    game = Game(agents_list)

    set_start_state = False
    if set_start_state:
        start_points = [[
            3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            0, 0
        ],
                        [
                            0, 0, 3, 3, 2, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                            0, 0, 0, 0, 0, 0, 0
                        ]]
Beispiel #18
0
import vision as v
import fortnite_api as fa
import output_screen as os
import weapons_screen as ws
import letters_classifier as lc
import time
import configparser


def log(text):
    print('[%s] %s' % (time.strftime('%H:%M:%S'), text))


config = configparser.ConfigParser()
config.read('config.ini')
vision = v.Vision()
classifier = lc.LettersClassifier()
gamescreen = gs.GameScreen(vision, classifier)
api = fa.FortniteApi(config)
# screen = os.OutputScreen(enabled = False)
weapons_screen = ws.WeaponsScreen(enabled=False)

kdas = {}

frame_number = 1
max_frame = 266
vision.frame_number = frame_number

while (True):
    start = time.perf_counter()
Beispiel #19
0
 def __init__(self):
     super(SearchStateMachine, self).__init__()
     self.__vision = vision.Vision()
     self.__laser = laser.Laser()
     self.__objectLoc = []
     self.__start = time.time()