示例#1
0
import queue
import time
import cv2 as cv
import keyboard
import autoit
from vision import Vision
from threading import Thread

view_spore = Vision('spore')


def vision(main_queue):
    view_spore.view_screen(main_queue)


def program_start():
    global main_queue
    main_queue = queue.Queue()

    while(True):
        vision(main_queue)
        if main_queue.qsize() > 0:
            print('In queue have' + str(main_queue.qsize()))
            element = main_queue.get()
            print(element['rectangles'])
            if element['type'] == 'monster':
                print('monster found on ' +
                      str(element['rectangles'][0][0] + int(element['rectangles'][0][2] / 2)) + ',' + str(element['rectangles'][0][1] + int(element['rectangles'][0][3] / 2)))
                autoit.mouse_click("left",
                                   element['rectangles'][0][0] + int(element['rectangles'][0][2] / 2), element['rectangles'][0][1] + int(element['rectangles'][0][3] / 2), 1)
                time.sleep(4)
示例#2
0
    def click_modes_button(self):
        self.log('Clicking modes')
        return self.click_button('modes', offset=(50, 30))
    
    def click_battlegrounds_button(self):
        self.log('Clicking battlegrounds')
        return self.click_button('modes', offset=(50, 30))

    def run(self):
        while True:
            self.vision.refresh_frame()
            if self.can_see_menu('mainmenu'):
                self.log('Were in the main menu.')
                self.state = 'main menu'
                if self.can_see_button('modes'):
                    self.click_modes_button()
            elif self.can_see_menu('modes'):
                self.log('Were in the modes selection menu')
                self.state = 'modes'
                self.click_battlegrounds_button()
            else:
                self.log('Not doing anything')
            time.sleep(1)
    
    def log(self, text):
        print('[%s] %s' % (time.strftime('%H:%M:%S'), text))
        
v = Vision()
c = Controller()        
g = Game(v,c)
g.run()
示例#3
0
from vision import Vision
from controller import Controller
from bot import Game
from window import Window

window = Window()
vision = Vision(window)
controller = Controller(window, vision)
game = Game(vision, controller, window)

game.run()
示例#4
0
#!/usr/bin/env python3

import config
from limiter import Limiter

from lrcat import LRCatalog
from vision import Vision

if __name__ == '__main__':
    catalog = LRCatalog(config.CATALOG, config.ALTERNATEROOT)
    vision = Vision(config.ENDPOINT, config.SUBSCRIPTION_KEY)
    limiter = Limiter(10, 60)
    for image in catalog.get_all_image():
        if not image.has_keyword("Visioned"):
            if image.file_format == "JPG":
                result = vision.parse_image(image.get_file_path(), True)
                limiter.process()
            elif image.file_format == "RAW":
                filename = catalog.get_converted_root_path(
                    image.root_path) + image.path + image.base_name + ".jpg"
                result = vision.parse_image(filename, True)
                limiter.process()
            else:
                print(f"Skip file:{image.get_file_path()}")
                result = None
            if result is not None:
                print(f'Processed:{image.id}')
                image.set_caption(result.description.captions[0].text)
                for tag in result.description.tags:
                    image.set_keyword(tag)
示例#5
0
import cv2 as cv
import numpy as np
from time import time
from windowcapture import WindowCapture
from vision import Vision

# initialize the WindowCapture class

wincap = WindowCapture('2009scape')

cascade_iron = cv.CascadeClassifier("cascade/cascade.xml")
vision_iron = Vision(None)

loop_time = time()
while (True):

    # get an updated image of the game
    screenshot = wincap.get_screenshot()

    rectangles = cascade_iron.detectMultiScale(screenshot)

    detection_image = vision_iron.draw_rectangles(screenshot, rectangles)
    cv.imshow("screenshot", detection_image)

    # debug the loop rate
    print('FPS {}'.format(1 / (time() - loop_time)))
    loop_time = time()

    # press 'q' with the output window focused to exit.
    # waits 1 ms every loop to process key presses
    key = cv.waitKey(1)
示例#6
0
    def saveImgFromCamera(self, n, cat, fname_start):
        # dp = DataPrep(max_data_n=100)
        categories = (
            'airplane',
            'backpack',
            'cactus',
            'dog',
            'ear',
            'face',
            'garden',
            'hamburger',
            'icecream',
            'jacket',
            'kangaroo',
            'ladder',
            'mailbox',
            'nail',
            'ocean',
            'paintbrush',
            'rabbit',
            'sailboat',
            'table',
            'underwear',
            'vase',
            'windmill',
            'yoga',
            'zebra',
        )
        print('.')
        v = Vision()
        print('.')
        print("Beginning saving doodles from image.")
        print("First: Drawing bounds for doodle.")
        #        boundFile = "bounds.txt"

        print("Now for the drawing part!")
        for a in range(n):
            for i in cat:
                # self.arm.move_back()
                # FormatConvert.drawFromFile(boundFile, self.arm, speed=3)
                # self.arm.move_away()
                print("\nPlease draw ", categories[i], "...")
                input()
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = v._getImage(v.cam1)
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                _, img = cv2.threshold(img, 150, 255, cv2.THRESH_BINARY_INV)
                img = np.rot90(img, 3)
                img = img[630:810, 260:440]
                name = fname_start + "_" + categories[i]
                name = name + "_" + str(a) + ".png"
                # cv2.imshow('frame', img)
                # cv2.waitKey(1)
                print("Saving name...")
                cv2.imwrite(name, img)
                print("Saved.")
                # input("next?")

        print("Done saving images.")
示例#7
0
import cv2
import numpy as np

from vision import Vision
from controller import Controller
from game import Game

vision = Vision()
controller = Controller()
game = Game(vision, controller)

# screenshot = vision.get_image('tests/screens/round-finished-results.png')
# print(screenshot)
# match = vision.find_template('bison-head', image=screenshot)
# print(np.shape(match)[1])

game.run()
 def robotInit(self):
     self.vision = Vision()
示例#9
0
world = World()

error = False
if len(sys.argv) >= 3:
    if sys.argv[1] == 'true':
        debug_mode = True
    elif sys.argv[1] == 'false':
        debug_mode = False
    else:
        error = True
    try:
        image_width = int(sys.argv[2])
    except:
        error = True
if len(sys.argv) < 3 or error is True:
    debug_mode = False
    image_width = 600

vision = Vision(0, image_width, debug_mode, world)

decision_maker = DecisionMaker(world)

try:
    while True:
        vision.update()

        decision_maker.make_decision()
except:
    vision.finish()
    decision_maker.finish()
示例#10
0
if __name__ == "__main__":
	bot = Bot()
	bot.start()
'''

#region Screen capture

os.chdir(os.path.dirname(os.path.abspath(__file__)))

# initialize the WindowCapture class
wincap = WindowCapture('Legends of Runeterra')
# initialize the WindowCapture class
#vision_IP = Vision('img\minalegal.jpg')

fogolivre = ["img\\oomcrewrookie.jpg", "img\\crimsondisciple.jpg", "img\\decimate.jpg", "img\\getexcited.jpg", "img\\imperialdemoli.jpg", "img\\legiongran.jpg", "img\\legionrear.jpg", "img\\legionsaboteur.jpg", "img\\mysticshot.jpg", "img\\oxianfervor.jpg", "img\\preciouspet.jpg", "img\\statikkshock.jpg", "img\\sformation.jpg", "img\\usedcasksalesman.jpg"]
vToFind = Vision(fogolivre[6])


loop_time = time()
while(True):
    screenshot = wincap.get_screenshot()
    
    #cv.imshow('Computer Vision', screenshot)
    # Esse funciona
    # points = vision_IP.find(screenshot, 0.7, 'rectangles')
    points = vToFind.find(screenshot, 0.99, 'rectangles')
    
    #print('FPS {}'.format(1 / (time() - loop_time)))
    #print('FPS {}'.format(time() - loop_time))
    loop_time = time()
    # press 'q' with the out
示例#11
0
                print "Starting recording to file" + filename
                output_file.close()
                output_file = gzip.open(filename + ".gz", "w")
                #writer = cv.CreateVideoWriter(filename+".avi", fourcc, 16, res_input, 1)
            elif event.button == 2:
                output_file.close()
                output_file = open("/dev/null", "w")
                #writer = cv.CreateVideoWriter('/dev/null', fourcc, 16, res_input, 1)
            elif event.button == 8:
                robot.set_speed(0, 0)
                sys.exit(0)


pygame.init()

vision = Vision("Record")

joystick = pygame.joystick.Joystick(0)
joystick.init()

robot = SerialCar('/dev/ttyUSB0', 38400)
out_values = [0, 0]

output_file = open("/dev/null", "w")

try:
    while True:

        handle_events()

        robot.set_speed(out_values[0], out_values[1])
示例#12
0
# robotHeight = 0.37 #[m]
ROBOT_HEIGHT = 0.42

clock = time.clock()
sensor.reset()
sensor.set_pixformat(sensor.RGB565)
sensor.set_framesize(sensor.QVGA)
sensor.set_auto_exposure(False)
sensor.set_auto_whitebal(False)
sensor.skip_frames(time=2000)
sensor.set_auto_gain(False, gain_db=0)
sensor.set_auto_whitebal(False, (-6.02073, -5.11, 1.002))
sensor.set_auto_exposure(False, 2000)

vision = Vision({})
loc = Localization(-0.7, -1.3, math.pi / 2, side)
strat = Strategy()
motion = Motion()
model = Model()
vision_postprocessing = Vision_postprocessing()
vision.load_detectors("vision/detectors_config.json")
with open("calibration/cam_col.json") as f:
    calib = json.load(f)

# setting model parametrs
mass1 = [0, 0, 0, 0, 0, 0]
mass2 = [0, 0]
model.setParams(calib["cam_col"], ROBOT_HEIGHT, mass1, mass2)

#motion.move_head()
示例#13
0
    img = np.fromstring(signedIntsArray, dtype ='uint8')
    img.shape = (h, w, 4)

    # Free Resources
    dcObj.DeleteDC()
    cDC.DeleteDC()
    win32gui.ReleaseDC(hwnd, wDC)
    win32gui.DeleteObject(dataBitMap.GetHandle())

    img = img[...,:3]
    img = np.ascontiguousarray(img)

    return img


v1 = Vision('Zombie.png')
v2 = Vision('creeper.png')
v3 = Vision('steeve.png')

loopTime = time()
while (True):

    screenshot = windowCapture()

    points = v1.findObjects(screenshot, 0.5, 'rectangles')
    points = v2.findObjects(screenshot, 0.5, 'rectangles')
    points = v3.findObjects(screenshot, 0.7, 'rectangles')


    print('Fps: {}'. format(1/(time()-loopTime)))
    loopTime = time()
示例#14
0
    cv2.namedWindow("App", cv2.WINDOW_AUTOSIZE)
    cv2.setMouseCallback("App", mouseCallback)

    servo1 = Servo(1, None)  # green
    servo2 = Servo(2, None, 7)  # blue
    servo3 = Servo(3, None, 4)  # red

    servo1.setAngle(20)
    servo2.setAngle(20)
    servo3.setAngle(20)

    cap = cv2.VideoCapture(1)
    focusedVideoCapture = FocusedVideoCapture(cap)
    focusedVideoCapture.calibrateFocus()

    vision = Vision(focusedVideoCapture)

    vision.calibrateCamera()

    pid = PID(servo1, servo2, servo3, 45, 10)

    while True:
        vision.getBallPoint()
        vision.calculateError()
        vision.showMotorImage(vision.lastCapture)

        redError, greenError, blueError = vision.getErrors()

        pid.update(redError, greenError, blueError)

        # time.sleep(0.01)
示例#15
0
import numpy as np
import os
from time import time
from vision import Vision

# Change the working directory to the folder this script is in.
# Doing this because I'll be putting the files from each video in their own folder on GitHub
os.chdir(os.path.dirname(os.path.abspath(__file__)))

# initialize the camera
cap = cv.VideoCapture(0)

# load the trained model
cascade_limestone = cv.CascadeClassifier('cascade/cascade.xml')
# load an empty Vision class
vision_limestone = Vision(None)

loop_time = time()
while (True):

    # get an updated image of the camera
    ret, screenshot = cap.read()
    gray = cv.cvtColor(screenshot, cv.COLOR_BGR2GRAY)

    if True:
        # do object detection
        rectangles = cascade_limestone.detectMultiScale(gray)
        # draw the detection results onto the original image
        detection_image = vision_limestone.draw_rectangles(
            screenshot, rectangles)
        # display the images
示例#16
0
 def __init__(self):
     self.vision = Vision(SCREEN_COORDS, IS_RETINA)
示例#17
0
import os
from time import time
from windowcapture import WindowCapture
from vision import Vision
from hsvfilter import HsvFilter
import time
import pyautogui

# Change the working directory to the folder this script is in.
# Doing this because I'll be putting the files from each video in their own folder on GitHub
os.chdir(os.path.dirname(os.path.abspath(__file__)))


# initialize the WindowCapture class
# initialize the Vision class
vision_limestone = Vision('cabbage.jpg')
# initialize the trackbar window
vision_limestone.init_control_gui()

# limestone HSV filter
hsv_filter = HsvFilter(0, 180, 129, 15, 229, 243, 143, 0, 67, 0)

#loop_time = time()
while(True):

    # get an updated image of the game
    screenshot = pyautogui.screenshot()
    screenshot = np.array(screenshot)
    screenshot = screenshot[:,:,::-1].copy()
    # pre-process the image
    processed_image = vision_limestone.apply_hsv_filter(screenshot)
示例#18
0
    neutral_b = cv.CascadeClassifier('cascade_models/cascade_copy.xml')
    jab = cv.CascadeClassifier('cascade_models/jab.xml')
    sheild = cv.CascadeClassifier('cascade_models/sheild.xml')

    count = MoveCounter()
    t = TimeTracker()
    labels = MoveLabels()
    tracker = Tracker()

    # This stores the locations at which images are recognized.
    move_locs = {labels.neutral_b: [], labels.jab: [], labels.shield: []}

    mario_training_filter = HsvFilter(0, 5, 0, 179, 255, 255, 0, 17, 0, 0)

    # initialize vision class
    vision = Vision('')

    while (True):

        # get an updated image of the game
        # USE THIS IF YOU ARE USING SCREENCAPTURE
        # smash_screenshot = wincap.get_screenshot()

        # USE THIS IF YOU ARE READING A VIDEO FILE
        ret, smash_screenshot = cap.read()

        # apply filter to img.
        output_image = vision.apply_hsv_filter(smash_screenshot,
                                               mario_training_filter)

        move_locs[labels.neutral_b] = neutral_b.detectMultiScale(output_image)
示例#19
0
from fieldManager import FieldManager
from vision import Vision
from s826 import S826
from subThread import SubThread
from realTimePlot import CustomFigCanvas
import syntax
#=========================================================
# UI Config
#=========================================================
qtCreatorFile = "mainwindow.ui"
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)
#=========================================================
# Creating instances of fieldManager and Camera
#=========================================================
field = FieldManager(S826())
vision = Vision(index=1, type='firewire', guid=2672909588927744,
                buffersize=12)  # greyscale mode


# vision2 = Vision(index=2,type='firewire',guid=2672909587849792,buffersize=12)
# to use usb camera, try vision = Vision(index=1,type='usb')
# to use 1 camera only, comment out this line:    vision2 = ...
#=========================================================
# Creating instances of PS3 controller
#=========================================================
# from PS3Controller import DualShock
# joystick = DualShock()
# to disable controller comment out these 2 lines
#=========================================================
# a class that handles the signal and callbacks of the GUI
#=========================================================
class GUI(QMainWindow, Ui_MainWindow):
示例#20
0
    TEST_OUTPUTS = np.array(
        [
            [[[67, 40]], [[258, 43]], [[211, 160]], [[86, 146]]],
            [
                [[66, 65]],
                [[116, 50]],
                [[152, 79]],
                [[152, 121]],
                [[100, 132]],
                [[64, 116]],
            ],
        ]
    )

    def test_contour_approx(self):
        for inputs, outputs in zip(self.TEST_INPUTS, self.TEST_OUTPUTS):
            self.assertTrue(
                np.array_equal(
                    sorted(
                        list(get_corners_from_contour(inputs, len(outputs))),
                        key=lambda x: x[0][0],
                    ),
                    sorted(list(outputs), key=lambda x: x[0][0]),
                )
            )


if __name__ == "__main__":
    camera_server = Vision(test=True)
    unittest.main()
示例#21
0
import cv2 as cv
import numpy as np
import pyautogui
from time import time
from vision import Vision
from hsvfilter import HsvFilter
from windowCapture import WindowsCapture

wincap = WindowsCapture('League of Legends')
vision_friend = Vision('image/FREND.jpg')
vision_friend.init_color_gui()

# HSV filter to needle img
#hsv_filter = HsvFilter(0,0,0,0,0,0,0,0,0,0)
loop_time = time()
while (True):

    screenshot = wincap.get_screenshot()
    # cv.imshow('computer vision', screenshot)

    # pre-process image

    # processed_image = vision_friend.apply_hsv_filter(screenshot,hsv_filter)
    #object detection

    # Processed needle image finding
    # rectangles = vision_friend.find(processed_image, threshhold=0.8)

    rectangles = vision_friend.find(screenshot, threshhold=0.8)

    # draw detection result on original image
示例#22
0
def test_point_is_not_close_enough():
    assert not Vision('localhost', 8081).is_point_close_enough((150, 2),
                                                               (20, 0), 5)
示例#23
0
 def setUp(self):
     self.vision = Vision()
示例#24
0
from car import Car
from vision import Vision
from camera import Camera
import cv2
import numpy as np

if __name__ == '__main__':
    #car = Car(18,12)
    vision = Vision(0.1)
    camera = Camera()
    
    #car.reset()

    while True:
        img ,depth = camera.getImages()
        img2 = img.copy()
        if depth is None:
            #car.stop()
            #car.reset()
            break

        ret = vision.processFrame(depth)
        print(ret)
        font = cv2.FONT_HERSHEY_SIMPLEX
        print(img.shape)
        if ret == 1:
            cv2.putText(img2,"BREAK",(90,120),font,1.5,(0,0,255),5)
        elif ret == 3:
            cv2.putText(img2,"STEER LEFT",(30,120),font,1.5,(0,0,255),5)
        elif ret == 2:
            cv2.putText(img2,"STEER RIGHT",(30,120),font,1.5,(0,0,255),5)
示例#25
0
 def __init__(self):
     self.vision = Vision()
示例#26
0
def main():
    vision = Vision()
    controller = Controller()
    ai = AI(vision, controller, 0.510066, 0.760666, 0.35663, 0.184483, 0)
    ai.run()
示例#27
0
rootlog = logging.getLogger(None)

display = Display(config)

arm = UArm(serial.Serial(port=args.device, baudrate=115200, timeout=config.serial_port_timeout), config)
if arm.connect():
    log.info("Detected UArm on device {}".format(args.device))
else:
    log.fatal("Could not connect to UArm using device '{}'".format(args.device))

storage_path = args.storage_path
capture_id = args.capture_id

log.info("Using storage '{}'".format(storage_path))

vision = Vision(config)

drive = Drive("/dev/cdrom", storage_path)

calibration_markers = None
with open(args.calibration_markers_file, 'r') as f:
    calibration_markers = json.load(f)

log.info("Calibration data loaded from '{}': {}".format(args.calibration_markers_file, calibration_markers))

log.info("Starting capture")

log.info("Picking up disk from source tray")
display.msg("PICKUP SRC TRAY")

# Pickup disk
示例#28
0
from vision import Vision

#Constant for viewport's angle- needs to change still- in degrees
viewportAngleX = 45
viewportAngleY = 30

#Instantiate vision object to look for objects with 8 vertices
vision = Vision(8)

targets = vision.find()

while 1:
    targets = vision.find()
    if (len(targets) > 0):
        #print(targets[0].getAngleToCenterFromCamera(vision.resX, vision.resY, viewportAngleX, viewportAngleY))
        print(targets[0].area)


    # vision.find returns an array of vision target objects
    # targets[i].points are the vertices of the target
    # targets[i].area is the area of the target in pixels
def printTarget(target):
    print(target.points)
示例#29
0
 def __init__(self):
     self.nlg = NLG(user_name=my_name)
     self.speech = Speech(launch_phrase=launch_phrase,
                          debugger_enabled=debugger_enabled)
     self.knowledge = Knowledge(weather_api_token)
     self.vision = Vision(camera=camera)
示例#30
0
import numpy as np
import os
from time import time
from windowCapture import WindowCapture
import pyautogui
from vision import Vision

# Change the working directory to the folder this script is in.
# Doing this because I'll be putting the files from each video in their own folder on GitHub
os.chdir(os.path.dirname(os.path.abspath(__file__)))

# initialize the WindowCapture class
wincap = WindowCapture('Calculator')
print(WindowCapture.list_window_names())
print(wincap.get_element_tree())
vision_limestone = Vision('seven.jpg')
print(vision_limestone)

loop_time = time()
while (True):

    # get an updated image of the game
    screenshot = wincap.get_screenshot()

    points = vision_limestone.find(np.array(screenshot), 0.8, 'rectangles')

    cv.imshow('Computer Vision', np.array(screenshot))

    # debug the loop rate
    print('FPS {}'.format(1 / (time() - loop_time)))
    loop_time = time()