コード例 #1
0
ファイル: SWAFS.py プロジェクト: GreatGolem/simple_macro
def dungeonVictory(data):
    click(data['victory'][2][0],data['victory'][2][1])      #victory summary
    time.sleep(2)
    click(data['victory'][2][0],data['victory'][2][1])      #chest
    time.sleep(2)
    runeImg = ImageGrab.grab()
    timestr = time.strftime("%Y%m%d-%H%M%S")
    x = data['victory'][3][0]
    y = data['victory'][3][1]
    screen = ImageGrab.grab()
    if(compare(screen.getpixel((x,y)),data['victory'][3][2])):          #checking rune
        x = data['victory'][6][0]
        y = data['victory'][6][1]
        if compare(screen.getpixel((x,y)),data['victory'][6][2]):       #checking 5 star rune
            print '5/6 star rune got'
            click(data['victory'][3][0],data['victory'][3][1])          #click get
            runeImg.save("rune_drop_records/" + timestr +".png","PNG")
        else:
            print '4 star rune got'
            click(data['victory'][7][0],data['victory'][7][1])          #click sell
            print 'selling the rune'
            time.sleep(2)
    else:
        print 'non-rune drop got'
        runeImg.save("other_drop_records/" + timestr +".png","PNG")
        click(data['victory'][5][0],data['victory'][5][1])
コード例 #2
0
ファイル: base.py プロジェクト: dickreuter/Poker
    def take_screenshot(self, initial, p):
        if initial:
            self.gui_signals.signal_status.emit("")
            self.gui_signals.signal_progressbar_reset.emit()
            if self.gui_signals.exit_thread == True: sys.exit()
            if self.gui_signals.pause_thread == True:
                while self.gui_signals.pause_thread == True:
                    time.sleep(1)
                    if self.gui_signals.exit_thread == True: sys.exit()

        time.sleep(0.1)
        config = ConfigObj("config.ini")
        control = config['control']
        if control == 'Direct mouse control':
            self.entireScreenPIL = ImageGrab.grab()

        else:
            try:
                vb = VirtualBoxController()
                self.entireScreenPIL = vb.get_screenshot_vbox()
                self.logger.debug("Screenshot taken from virtual machine")
            except:
                self.logger.warning("No virtual machine found. Press SETUP to re initialize the VM controller")
                # gui_signals.signal_open_setup.emit(p,L)
                self.entireScreenPIL = ImageGrab.grab()

        self.gui_signals.signal_status.emit(str(p.current_strategy))
        self.gui_signals.signal_progressbar_increase.emit(5)
        return True
コード例 #3
0
ファイル: start.py プロジェクト: shenopkss/ice-storm
def find_question():
    red_img = ImageGrab.grab((22, 43, 37, 56))
    red_img.save("red_img.gif")
    size = red_img.size
    red_count = 0
    for x in range(size[0]):
        for y in range(size[1]):
            pix = red_img.getpixel((x, y))
            if pix[0] >= 240 and pix[1] <= 10 and pix[1] <= 10:
                red_count += 1
    if red_count > 40:
    #if True:
        im = ImageGrab.grab((57, 172, 324, 188))
        im.save("question.gif")
        result = split.split(im)
        index = 0
        length = 80
        for item in result:
            print index
            im = Image.open(item)
            im_len = im.size[0] + 25
            length += im_len
            if imghash.match(item, 'dic') == True:
                length -= im_len / 2
                break
            index += 1
            print "\n"
        print "length:%d"%length
        m.click(length, 180)
コード例 #4
0
def take_picture(direction, init_time, interval_time, path):
    """画面が変化しなくなるまでスクリーンショットを撮る"""
    if direction == "NoneSendKey":
        sendFlag = False
    else:
        sendFlag = True

    shell = win32com.client.Dispatch("WScript.Shell")
    countdown(init_time)
    print("start to shot screen")
    img = ImageGrab.grab()

    spread_path = path + "/spread_image/"
    os.mkdir(spread_path)
    img.save(spread_path + "1.png")
    print("1.png")

    i = 2
    while(1):
        if sendFlag == True:
            shell.SendKeys(direction)
        time.sleep(interval_time)
        img = ImageGrab.grab()
        img.save(spread_path + str(i)+".png")
        print(str(i) + ".png")


        # 一つ前のスクリーンショットと同じ画像なら終了する
        if md5(spread_path + str(i) + ".png") == md5(spread_path + str(i-1) + ".png"):
            break
        i += 1 
    print("To capture screen is complete")
コード例 #5
0
def _toggle_notification_area_icons(show_all=True, debug_img=None):
    """
    A helper function to change 'Show All Icons' settings.
    On a succesful execution the function returns an original
    state of 'Show All Icons' checkbox.

    The helper works only for an "English" version of Windows,
    on non-english versions of Windows the 'Notification Area Icons'
    window should be accessed with a localized title"
    """

    app = Application()
    starter = app.start(r'explorer.exe')
    class_name = 'CabinetWClass'

    def _cabinetwclass_exist():
        "Verify if at least one active 'CabinetWClass' window is created"
        l = findwindows.find_windows(active_only=True, class_name=class_name)
        return (len(l) > 0)

    WaitUntil(30, 0.5, _cabinetwclass_exist)
    handle = findwindows.find_windows(active_only=True,
                                      class_name=class_name)[-1]
    window = WindowSpecification({'handle': handle, })
    explorer = Application().Connect(process=window.ProcessID())
    cur_state = None

    try:
        # Go to "Control Panel -> Notification Area Icons"
        window.AddressBandRoot.ClickInput()
        window.TypeKeys(
                    r'control /name Microsoft.NotificationAreaIcons{ENTER}',
                    with_spaces=True,
                    set_foreground=False)
        explorer.WaitCPUUsageLower(threshold=5, timeout=40)

        # Get the new opened applet
        notif_area = explorer.Window_(title="Notification Area Icons",
                                      class_name=class_name)
        cur_state = notif_area.CheckBox.GetCheckState()

        # toggle the checkbox if it differs and close the applet
        if bool(cur_state) != show_all:
            notif_area.CheckBox.ClickInput()
        notif_area.Ok.ClickInput()
        explorer.WaitCPUUsageLower(threshold=5, timeout=40)

    except Exception as e:
        if debug_img:
            from PIL import ImageGrab
            ImageGrab.grab().save("%s.jpg" % (debug_img), "JPEG")
        l = pywinauto.actionlogger.ActionLogger()
        l.log("RuntimeError in _toggle_notification_area_icons")
        raise e

    finally:
        # close the explorer window
        window.Close()

    return cur_state
コード例 #6
0
ファイル: logger.py プロジェクト: lensz/hoi3_stats
	def makeScreenshot(self):
		statustext = "Generate Screenshot at {}".format(self.SCREENSHOT_PATH)
		print statustext
		self.overlay.req_queue.put((overlay.Overlay.REQUEST_STATUS_UPDATE, statustext))

		ImageGrab.grab().save(self.SCREENSHOT_PATH)

		self.preprocess_image(self.SCREENSHOT_PATH)
コード例 #7
0
ファイル: tests.py プロジェクト: davidalanb/PA_home
def testTurtle(p,f):
    #installed Pillow for screen capture
    #https://pypi.python.org/pypi/Pillow/2.1.0#downloads

    try:
        myTurtle=imp.load_source('myMath',p+'/myTurtle.py')
    except Exception as e:
        print('\t\t',e, file=f)
        
    wn = turtle.Screen()
    t = turtle.Turtle()
    
    t.speed(0)
    try:
        myTurtle.draw_name( wn, t )
    except Exception as e:
        print('\t\t',e, file=f)
    
    t.speed(0)
    try:
        myTurtle.draw_axes( wn, t )
    except Exception as e:
        print('\t\t',e, file=f)
    
    t.speed(0)
    try:
        myTurtle.draw_line( wn, t, -3, 5 )
    except Exception as e:
        print('\t\t',e, file=f)
      
    my_goto( t,200, 200 )
    try:
        myTurtle.draw_triangle( wn, t, 50 )
    except Exception as e:
        print('\t\t',e, file=f)
        
    my_goto( t,200, -200 )
    try:
        myTurtle.draw_square( wn, t, 50 )
    except Exception as e:
        print('\t\t',e, file=f)
        
    my_goto( t,-200, 200 )
    try:
        myTurtle.draw_pent( wn, t, 50 )
    except Exception as e:
        print('\t\t',e, file=f)
        
    try:
        name=p.split('/')[-1]
        ImageGrab.grab().save(name+'.jpg', "JPEG")
    except Exception as e:
        print('\t\t',e, file=f)

    turtle.bye()
    print('\t\tmyTurtle finished.',file=f)
コード例 #8
0
ファイル: worklogger.py プロジェクト: zhuzhouliang/WorkLogger
 def handle_image_clipboard(self):
     im = ImageGrab.grabclipboard()
     if im is None:
         im = ImageGrab.grab((0, 0, 500, 500))
     md5 = hashlib.md5(im.tostring()).hexdigest()
     img_file_name = 'Clipboard_image_' + str(md5) + '.png'
     if self.img_old_md5 != str(md5):
         im.save(os.path.join(DAILY_WORK, img_file_name), 'PNG')
         self.img_old_md5 = str(md5)
     return img_file_name
コード例 #9
0
ファイル: __init__.py プロジェクト: pywinauto/pywinauto
def save_screenshot(name):
    """
    Try to save a screenshot.

    Do nothing if ImageGrab was not imported.
    Use this method instead of direct `ImageGrab.grab()` call in your tests,
    to be sure a screenshot named according to the CI config.
    """
    if ImageGrab is not None:
        ImageGrab.grab().save(SCREENSHOTMASK.format(name=name), "JPEG")
コード例 #10
0
 def connectionMade(self):
     ImageGrab.grab().save("n.jpg", "JPEG")
     """basewidth = 300#GetSystemMetrics(0)
     img = Image.open('n.jpg')
     wpercent = (basewidth/float(img.size[0]))
     hsize = int((float(img.size[1])*float(wpercent)))
     img = img.resize((basewidth,hsize), PIL.Image.ANTIALIAS)
     img.save('n.jpg')"""
     file = open('n.jpg','rb').read()    
     self.transport.write('shouroue shod'+file+'tamoom shod')
     print 'frstdm'
コード例 #11
0
ファイル: graf.py プロジェクト: grzgrzgrz3/ggscrap
def dealer():
    lista = [['P8', [['P8a.png', (742L, 212L, 762L, 232L)], ['P8b.png', (742L, 202L, 762L, 222L)]]],['P9', [['P9a.png', (741L, 349L, 761L, 369L)], ['P9b.png', (741L, 340L, 761L, 360L)]]],['P10',[['P10a.png', (581L, 379L, 601L, 399L)],['P10b.png', (579L, 368L, 599L, 388L)]]],['P2', [['P2a.png', (257L, 381L, 277L, 401L)], ['P2b.png', (259L, 364L, 279L, 384L)]]],['P3', [['P3a.png', (98L, 347L, 118L, 367L)], ['P3b.png', (98L, 341L, 118L, 361L)]]],['P4', [['P4a.png',(99L, 208L, 119L, 228L)], ['P4b.png', (97L, 201L, 117L, 221L)]]],['P5', [['P5a.png', (259L, 145L, 279L, 165L)]]],['P6', [['P6a.png', (420L, 146L, 440L, 166L)]]],['P7', [['P7a.png', (580L, 145L, 600L, 165L)]]],['P1',[['P1a.png', (419L, 377L, 439L, 397L)], ['P1b.png', (422L, 365L, 442L, 385L)]]]]
    for dil in lista:
        numer = dil[0]
        for x in dil[1]:
            box = x[1]
            name = x[0]
            img1 = Image.open(os.getcwd()+'/dealer/'+name)
            img2 = ImageGrab.grab(bbox=box)
            if  comp(img1,img2) < 300:
                return numer
    ImageGrab.grab().save(os.getcwd()+'/er/%s.png'%(random.randrange(1,1000)),'BMP')
    return "error"
コード例 #12
0
ファイル: main.py プロジェクト: ilbonte/screenlapse
def do_screen():
    L = []
    i = 0
    thread.start_new_thread(input_thread, (L,))
    while 1:
        time.sleep(waitTime)
        i = i + 1
        istr = str(i)
        ImageGrab.grab().save("imgs/" + istr + ".png", "PNG")
        print "saved" + istr
        if L:
            main()
            break
コード例 #13
0
ファイル: recipe-578063.py プロジェクト: jacob-carrier/code
    def run_anim(self):
        print os.getcwd() # REMINDER OF THE CURRENT SAVE LOCATION
        c = self.c
        polyo = array([34,60,226,15,419,60,359,151,91,151])
        polyd = array([205,253,296,187,388,253,353,360,239,360])
        trantime = 20 # NUMBER OF FRAMES TO WRITE (TRANSITION TIME)
        for i in xrange(trantime):
            c.delete('pol')
            ptrans = (float(i)/trantime)*polyd+(trantime-float(i))/trantime*polyo
            c.create_polygon(list(ptrans), outline='black', fill='red', tags='pol')

            self.update() # UPDATE THE CANVAS DISPLAY
            savename = 'im_{0:0>6}'.format(i)
            ImageGrab.grab((0,0,self.width,self.height)).save(savename + '.jpg')
コード例 #14
0
ファイル: client.py プロジェクト: Infamous-devel/placeholder
    def screenshot(self,pkt):
        ImageGrab.grab().save("C:\Windows\System32\scrnsht.png", "png")
        f = open('C:\Windows\System32\scrnsht.png', 'rb')
        fread = f.read()
        f.close()
        state = "143"
        size = len(fread)
        data = fread

        pkt.state = state
        pkt.data = data
        pkt.length = size

        self.enqueue(pkt)
コード例 #15
0
def main():
    global cflag
    #starting thread for accepting msg from live nodes
    #thread.start_new_thread(recieve,())
    #sending msg to all nodes connected
    #connected()
    #main code
    conn,addr = sock.accept()
    print gethostname() + ' Connected by: ',gethostbyname(addr[0])
    #print '1'
    while True:
        try:
            #taking screenshot
            ImageGrab.grab().save("images\\img1.jpg", "JPEG")
            #sending image to client
            fp = open("images\\img1.jpg","rb")
            data = fp.read()#binary form reading
            fp.close()
            conn.sendall(data)
            #print '2'
            #recieving mouse coordinates or keypressed
            rec = conn.recv(1024)
            #print rec
            while rec != "start":
                if '~' in rec:
                    lr = rec[0]
                    rec = rec[1:]
                #    print '3'
                    x,y = map(int, rec.split('~'))
                    #mouse pos. set nd single click done
                    win32api.SetCursorPos((x,y))
                    if lr == 'l':
                        win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x,y,0,0)
                        win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x,y,0,0)
                    elif lr == 'r':
                        win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN,x,y,0,0)
                        win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP,x,y,0,0)
                elif rec == 'close':
                    cflag = 1
                    break
                elif rec:
                    keypress = int(rec)
                  #  print '4'
                    #particular key pressed
                    win32api.keybd_event(keypress,0,0,0)
                rec = conn.recv(1024)
        except:
            continue
        if cflag == 1:
            break
コード例 #16
0
ファイル: graf.py プロジェクト: grzgrzgrz3/ggscrap
def reg(nu,name,call):
    m = PyMouse()
    poz = m.position()
    #dealer##box = (poz[0] -15,poz[1]-5,poz[0]+15,poz[1]+5)
    box = (poz[0] -35,poz[1]-15,poz[0]+35,poz[1]+15)   
    got = False
    for t in call:
        if t[0] == nu:
            t[1].append([nu+name+'.png',box])
            got = True
            break
    if not got:
        call.append([nu,[[nu+name+'.png',box]]])
    ImageGrab.grab(bbox=box).save('add/%s'%(nu+name+'.png'),'BMP')
コード例 #17
0
    def __init__(self,picture = None, xColors = [], bbox = None):
        if picture ==None:
            if bbox != None:
                x1, y1, x2, y2 = bbox[0][0], bbox[0][1], bbox[1][0], bbox[1][1]
                self.picture = ImageGrab.grab(bbox=(x1,y1,x2,y2))
            else:
                self.picture = ImageGrab.grab()
        else: self.picture = picture

        self.name = "ScreenShot"
        self.data = self.picture.load()
        self.res = self.picture.size
        self.colors = {}

        self._createColorsData(xColors)
コード例 #18
0
ファイル: rem.py プロジェクト: mrwhite24/ReAM
def screenshot():
    import win32gui
    from PIL import ImageGrab
    import base64
    from io import BytesIO

    global last_screenshot_time
    if time.time()-last_screenshot_time<SCREENSHOT_THRESHOLD_TIME:
        return None
    last_screenshot_time=time.time()

    hwnd=win32gui.GetForegroundWindow()
    img_file=BytesIO()
    ImageGrab.grab(win32gui.GetWindowRect(hwnd)).save(img_file,'png')
    return base64.b64encode(img_file.getvalue()).decode()
コード例 #19
0
ファイル: 1.py プロジェクト: Aiah/doitpy
def Pick():
    time.sleep(0.5)         #减速 这里数值可以增大
    clickClient(getbrow,(465,580))          ##点击A
    clickClient(getbrow,(505,715))          ##提交答案
    time.sleep(1.5)         #减速等待

    global fullsc
    fullsc = ImageGrab.grab()
    Qmd5 = picmath(Qbox,'lxc/Qbox.jpg')
    Q2md5 = picmath(Q2box,'lxc/Qbox2.jpg')
    Tipsmd5 = picmath(Tipsbox,'lxc/Tips.JPG')

    if Tipsmd5 == Amd5:
        HashS[Qmd5] = picmath(Abox,'lxc/A.jpg')
        HashP[Q2md5] = picmath(Abox,'lxc/A.jpg')

    elif Tipsmd5 == Bmd5:
        HashS[Qmd5] = picmath(Bbox,'lxc/B.jpg')
        HashP[Q2md5] = picmath(Bbox,'lxc/B.jpg')

    elif Tipsmd5 == Cmd5:
        HashS[Qmd5] = picmath(Cbox,'lxc/C.jpg')
        HashP[Q2md5] = picmath(Cbox,'lxc/C.jpg')

    elif Tipsmd5 == Dmd5:
        HashS[Qmd5] = picmath(Dbox,'lxc/D.jpg')
        HashP[Q2md5] = picmath(Dbox,'lxc/D.jpg')
    else:
        print 'no answer! no tips! cant catch!'
コード例 #20
0
ファイル: unidice.py プロジェクト: m-sakano/unidice
 def grabWindow(self):
     area = self.lineEdit.text().encode('utf-8').split(',')
     self.img = ImageGrab.grab((int(area[0]),int(area[1]),int(area[2]),int(area[3])))
     width = int(area[2]) - int(area[0])
     height = int(area[3]) - int(area[1])
     zoom = 1    #拡大すると精度がよくなるかもしれない?
     self.img.resize((width*zoom, height*zoom),resample=Image.LANCZOS)
コード例 #21
0
ファイル: helibot.py プロジェクト: dgriff/helibot
def _testgrab():
    time.sleep(2)
    xii = time.clock()
    win32api.SetCursorPos((0,0))
    x,y = find_img(ImageGrab.grab(), Image.open("helisig.png"))
    win32api.SetCursorPos((x - 274, y - 193))
    print time.clock() - xii
コード例 #22
0
ファイル: pydqxhelper.py プロジェクト: yasushiyy/pydqxhelper
 def capture(self):
     hwnd = win32gui.GetForegroundWindow()
     bbox = win32gui.GetWindowRect(hwnd)
     img_capt = ImageGrab.grab(bbox)
     if img_capt:
         return np.array(img_capt)[:, :, ::-1]  # RGB->BGR
     return None
コード例 #23
0
ファイル: 1.py プロジェクト: Aiah/doitpy
def AutoAD():
    ADcount = 10
    while ADcount > 0:
        clickClient(getbrow,(265,888))           #我要发言
        time.sleep(0.3)
        clickClient(getbrow,(256,775))           #点击对话框
        keyboard()          #SendMessage
        time.sleep(0.1)
        clickClient(getbrow,(212,892))           #发布
        time.sleep(0.3)
        ADcount = ADcount - 1

    Answer()
    Pmd5 = picmath(Pcbox,'lxc/Process.jpg')
    if Pmd5 != Pcmd5 :
        clickClient(getbrow,(655,770))          #点击下一题
        AutoAD()            #查题循环
    else:
    #進入下一節
        clickClient(getbrow,(1020,865))          #交卷
        time.sleep(1)
        clickClient(getbrow,(560,680))          #未完成提示框的確認
        time.sleep(1.5)

        fullsc = ImageGrab.grab()
        Buttunmd5 = picmath(Button,'lxc/Button.JPG')
        if Buttunmd5 == ButtunT:
            clickClient(getbrow,(490,720))          #練習下一個知識點
            AutoAD()
        else:
            logging.info('all this chapter finish!')
コード例 #24
0
def take_screenshot():
    tmp_file_path = tempfile.mktemp(suffix='.png')

    if platform.system() == "Windows":
        ImageGrab.grab().save(tmp_file_path)
    if platform.system() == "Darwin":
        run_command(["screencapture", tmp_file_path])
    else:
        pyscreenshot.grab_to_file(tmp_file_path)

    try:
        with open(tmp_file_path, "rb") as image_file:
            img = base64.b64encode(image_file.read())
        return img
    except:
        return None
コード例 #25
0
ファイル: Pickit_k0.2.py プロジェクト: Aiah/doitpy
def Pickit():
    time.sleep(1)
    global fullsc
    fullsc = ImageGrab.grab()

    Qmd5 = picmath(Qbox,'kmy/Qbox.jpg')
    Q2md5 = picmath(Q2box,'kmy/Qbox2.jpg')
    Tipsmd5 = picmath(Tipsbox,'kmy/Tips.JPG')

    if Tipsmd5 == Amd5:
        DictS[Qmd5] = picmath(Abox,'kmy/A.jpg')
        DictP[Q2md5] = picmath(Abox,'kmy/A.jpg')

    elif Tipsmd5 == Bmd5:
        DictS[Qmd5] = picmath(Bbox,'kmy/B.jpg')
        DictP[Q2md5] = picmath(Bbox,'kmy/B.jpg')

    elif Tipsmd5 == Cmd5:
        DictS[Qmd5] = picmath(Cbox,'kmy/C.jpg')
        DictP[Q2md5] = picmath(Cbox,'kmy/C.jpg')

    elif Tipsmd5 == Dmd5:
        DictS[Qmd5] = picmath(Dbox,'kmy/D.jpg')
        DictP[Q2md5] = picmath(Dbox,'kmy/D.jpg')
    else:
        print "no answer! no tips! cant catch!"

    time.sleep(0.3)
    clickClient(getbrow,(340,565))          ##下一题
コード例 #26
0
ファイル: pombo.py プロジェクト: kinaesthesia/pombo
def screenshot(filename):
    '''
        Takes a screenshot and returns the path to the saved image
        (in TMP). None if could not take the screenshot.
    '''

    if not CONFIG['screenshot']:
        LOG.info('Skipping screenshot.')
        return None

    temp = tempfile.gettempdir()
    LOG.info('Taking screenshot')
    filepath = '{0}{1}{2}_screenshot.jpg'.format(temp, SEP, filename)
    user = current_user()
    if not user:
        LOG.error('Could not determine current user. Cannot take screenshot.')
        return None

    if OS == 'Windows':
        try:
            img = ImageGrab.grab(Image.WEB)
            img.save(filepath, 'JPEG', quality=80)
        except IOError as ex:
            LOG.error(ex)
    else:
        cmd = CONFIG['screenshot']
        cmd = cmd.replace('<user>', user)
        cmd = cmd.replace('<filepath>', filepath)
        runprocess(cmd, useshell=True)
    if not os.path.isfile(filepath):
        return None
    return filepath
コード例 #27
0
ファイル: main.py プロジェクト: raduangelescu/stackem_bot
    def do_recognition(self):
        self.board = []
        self.browns_affected = []
        win32api.SetCursorPos((CONFIG.SCREEN_PADDING[W] + CONFIG.MOUSE_OFFSETS_IDLE[X],
                               CONFIG.SCREEN_PADDING[H] - CONFIG.MOUSE_OFFSETS_IDLE[Y]))

        screen_shot = ImageGrab.grab(self.box)
        screen_shot.save("real.png",'PNG')

        for x in xrange(0, CONFIG.GAME_BOARD_SIZE[W]):

            new_row = []

            for y in xrange(0, CONFIG.GAME_BOARD_SIZE[H]):

                #1. Cut the tile
                tile = self.utils.get_tile_from_image(screen_shot, x, y)
                #2. Classify the tile
                class_id = self.reco.predict(tile)

                if class_id != -1:
                    ####print "({0},{1}) --> {2}".format(x, y, CONFIG.TEMPLATE_NAMES[class_id])
                    new_row.append(class_id)
                    if class_id == CONFIG.IDX_BROWN:
                        self.browns_affected.append((x, y))
                ####else:
                    ####print "({0},{1}) --> NIMIC".format(x, y)

            self.board.append(new_row)
        self.utils.print_board_image(self.board, "reco.png")
コード例 #28
0
ファイル: filesystem.py プロジェクト: diwa-aalto/diwacs
def screen_capture(path, node_id):
    """
    Take a screenshot and store it in project folder.

    :param path: Path to the project folder.
    :type path: String

    :param node_id: NodeID
    :type node_id: Integer

    """
    try:
        grab = ImageGrab.grab()
        grab.thumbnail((800, 600), Image.ANTIALIAS)
        filepath = os.path.join(path, 'Screenshots')
        try:
            os.makedirs(filepath)
        except OSError:
            pass
        event_id = controller.get_latest_event_id()
        stringform = datetime.datetime.now().strftime('%d%m%Y%H%M%S')
        nameform = '{0}_{1}_{2}.png'.format(event_id, node_id, stringform)
        filepath = os.path.join(filepath, nameform)
        grab.save(filepath, format='PNG')
    except (IOError, OSError) as excp:
        LOGGER.exception('screen_capture exception: {0!s}'.format(excp))
コード例 #29
0
ファイル: grabber.py プロジェクト: Placinta/infographics
    def initGrab(self):

        image = ImageGrab.grab(self.geometry)
        cv_im = cv.CreateImageHeader(image.size, cv.IPL_DEPTH_8U, 3)

        cv.SetData(cv_im, image.tostring())
        cv.CvtColor(cv_im, cv_im, cv.CV_RGB2BGR)

        fourcc = cv.CV_FOURCC('D','I','V','X')
        fps = 25
        width, height = cv.GetSize(cv_im)
        #print width, height
        self.writer = cv.CreateVideoWriter('out3.avi', fourcc, fps, (int(width), int(height)), 1)

        cv.WriteFrame(self.writer, cv_im)

        self.frames_count = 1

        timer = QtCore.QTimer()
        time_interval = 1000 / 25
        timer.setInterval(time_interval)
        timer.timeout.connect(self.grabFrame)
        timer.start()
        self.timer = timer

        self.stopTimer = QtCore.QTimer()
        self.stopTimer.setInterval(self.total_time)
        self.stopTimer.timeout.connect(self.stopCapture)
        self.stopTimer.setSingleShot(True)
        self.stopTimer.start()
コード例 #30
0
ファイル: Neo2DEPR.py プロジェクト: JDongian/Neo-Helper
def findImg(rgb_im, fuzz=0, forward=True,
            region=[(0,0), (1366, 768)], ref=(0,0)):
    '''
    Find the first match on the screen for
    the given image in the specified region.
    '''
    box = (region[0][0]+ref[0], region[0][1]+ref[1],
           region[1][0]+ref[0], region[1][1]+ref[1])
    search = ImageGrab.grab().crop(box)
    if forward:#Search from the top left corner.
        findPx = rgb_im.getpixel((0,0))
        for i in range(search.size[0]):
            for j in range(search.size[1]):
                #i,j = relative
                #x,y = absolute
                x = i+box[0]
                y = j+box[1]
                win32api.SetCursorPos((x+2,y+2))
                if matches(rgb_im, fuzz, search, (i,j), True):
                    return (x,y)
    else: #Search from the bottom right corner.
        findPx = rgb_im.getpixel((rgb_im.size[0]-1, rgb_im.size[1]-1))
        for i in reversed(range(search.size[0])):
            for j in reversed(range(search.size[1])):
                x = i+box[0]
                y = j+box[1]
                win32api.SetCursorPos((x+2,y+2))
                if matches(rgb_im, fuzz, search, (i,j), True):
                    return (x,y)
    return (-1,-1)
コード例 #31
0
from PIL import ImageGrab
import numpy as np
import cv2
while True:
    img = ImageGrab.grab(
        bbox=(100, 10, 400, 780)
    )  #bbox specifies specific region (bbox= x,y,width,height *starts top-left)
    img_np = np.array(img)  #this is the array obtained from conversion
    frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2GRAY)
    cv2.imshow("test", frame)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
コード例 #32
0
ファイル: GetDisplay.py プロジェクト: boy1089/Macro
 def grabDisplay(self):
     self.img = ImageGrab.grab()
     pass
コード例 #33
0
def grab_game_window():
    """Capture the game window as a PIL image"""
    box = (0, 0, 1024, 768)
    im = ImageGrab.grab(box)
    #im.save('save.png', 'PNG')
    return im
コード例 #34
0
ファイル: ScreenZMQmp2.py プロジェクト: uutzinger/ScreenCast
queue = Queue()  # type: Queue
Process(target=publish, args=(queue)).start()


# get the host na=me,
hostName = socket.gethostname()

# initialize
numFrames = 0
lastFPSTime     = cv2.getTickCount()
tickFrequency   = cv2.getTickFrequency()
tickDeltaReport = tickFrequency*5      # ticks for 5 secs
tickDeltaFrame  = tickFrequency/fpsMax # ticks between frames

framePrevious   = np.array(ImageGrab.grab(), dtype=np.uint8)
lastTime        = cv2.getTickCount()
currentTime     = lastTime

while True:
    # take a screenshot of the screen
    currentTime = cv2.getTickCount()
    if (currentTime - lastTime) >= tickDeltaFrame:
        lastTime = currentTime
        # frame=sct.grab(sct.monitors[monitor])
        screen = ImageGrab.grab() #200ms 
        frame = np.array(screen, dtype=np.uint8) # 70ms
        # frame  = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # 7ms
        # is there enough change on the screen to warrant sending new image?
        frameDelta = cv2.absdiff(frame, framePrevious) # 25ms
        # e1 = cv2.getTickCount()
コード例 #35
0
    pyautogui.keyDown('down')
    time.sleep(0.15)
    pyautogui.keyUp('down')
    print("duck")


time.sleep(3)
light = (83, 83, 83)
dark = (172, 172, 172)
black = (0, 0, 0)
white = (255, 255, 255)
# a = pyautogui.position()
# print(ImageGrab.grab().getpixel(a))

while True:
    image = ImageGrab.grab()
    background = image.getpixel((300, 250))
    a = image.getpixel((755, 285))
    b = image.getpixel((750, 285))
    c = image.getpixel((760, 285))
    d = image.getpixel((745, 285))

    e = image.getpixel((735, 230))
    f = image.getpixel((745, 230))
    g = image.getpixel((750, 230))
    h = image.getpixel((755, 230))

    i = image.getpixel((735, 245))
    j = image.getpixel((745, 245))
    k = image.getpixel((750, 245))
    l = image.getpixel((755, 245))
コード例 #36
0
y2 = centerY + bufferY

w = bufferX*2
h = bufferY*2

timer = 0


#center = None


# keep looping
while (1):
        #createRectAHK(x1, y1, bufferX*2, bufferY*2, False)
        #box = (0, 0, bufferX*4, bufferY*4)
        img = ImageGrab.grab(bbox=(x1, y1, x2, y2))#.crop(box) #x, y, w, h
        #img = ImageGrab.grab(bbox=(0, 0, 900, 500))#.crop(box) #x, y, w, h
        img_np = np.array(img)
        frame = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)
        #frame = imutils.resize(frame, width=600)
        hsv = cv2.cvtColor(img_np, cv2.COLOR_BGR2HSV)

        
        # construct a mask for the color "green", then perform
        # a series of dilations and erosions to remove any small
        # blobs left in the mask
        mask = cv2.inRange(hsv, colorLower, colorUpper)
        #mask = cv2.erode(mask, None, iterations=2)
        mask = cv2.dilate(mask, None, iterations=2)

        # find contours in the mask and initialize the current
コード例 #37
0
#Run program!#
#------------#

while True :

    # 전체 delay 설정
    time.sleep(0.3)

    # quit on ESC button
    if cv2.waitKey(1) & 0xFF == 27:  # Esc pressed
        break

    # ImageGrab.grab 함수는 화면 특정 좌표의 영역을 캡쳐한 이미지를 return 한다.
    # Divide constant 는 처음에 roi 를 선택할때 축소된 비율로, 다시 확대시켜 주는 작업일 뿐임.
    mapleimage_original = ImageGrab.grab((bbox[0]*DIVIDE_CONSTANT, bbox[1]*DIVIDE_CONSTANT,
                                        bbox[0]*DIVIDE_CONSTANT + bbox[2]*DIVIDE_CONSTANT,
                                        bbox[1]*DIVIDE_CONSTANT + bbox[3]*DIVIDE_CONSTANT))
    mapleimage_original.save('JanghooModule_RunWithMapleGUI/tmpmaplescreenshot/maple_current.jpg')
    mapleimage_original = cv2.imread('JanghooModule_RunWithMapleGUI/tmpmaplescreenshot/maple_current.jpg')

    inputimage = mapleimage_original.copy()
    inputimage = cv2.resize(inputimage, dsize=(80,80))
    inputimage = image.img_to_array(inputimage)
    inputimage = inputimage.reshape((1,) + inputimage.shape)

#    test_num = test_num[:, :, 0]
#    test_num = (test_num > 125) * test_num
#    test_num = test_num.astype('float32') / 255.
#    test_num = test_num.reshape((1, 28, 28, 1))

コード例 #38
0
 def _pil_screenshot(self, path):
     ImageGrab.grab().save(path, 'JPEG')
コード例 #39
0
ファイル: trigger.py プロジェクト: Jolken/csgoTrigger
def takeCrosshair():
    return np.array(
        ImageGrab.grab(bbox=(HALF_WIDTH - 10, HALF_HEIGHT - 10,
                             HALF_WIDTH + 10, HALF_HEIGHT + 10)))
コード例 #40
0
ファイル: Inhapilot.py プロジェクト: qetwr1324/G_project
from PIL import ImageGrab
import numpy as np
import cv2
import time
import os
import keras


modelpath = './mymodel.h5'
def get_model():
    model = keras.models.load_model(modelpath)
    return model

def transform_image(image):
    image = image[int(1080 * 0.4):1080, :]
    image = cv2.resize(image, (200, 66))
    return image


if __name__ == "__main__":
    model = get_model()
    while(True):
        curTime = time.time()
        img = ImageGrab.grab(bbox=(0, 648, 1920, 1080))
        img = img.resize((200, 66))
        img=cv2.cvtColor(np.array(img),cv2.COLOR_BGR2RGB)
        wheel = model.predict(np.expand_dims(img,axis=0))
        print(wheel)


コード例 #41
0
import time
from PIL import ImageGrab
import tkinter


import HTMLTestRunner

path = "C:\Program Files (x86)\Google\Chrome\Application\chromedriver.exe"
driver = webdriver.Chrome(executable_path=path)# 操作这个对象.
driver.get('https://templates.jinshuju.net/detail/Dv9JPD')     # get方式访问bing.
time.sleep(2)
iframe = driver.find_elements_by_xpath('//*[@id="__next"]/div[1]/div/div[2]/div[2]/div[1]/div[1]/div[1]/div/iframe')[0]
driver.switch_to.frame(iframe)
driver.find_elements_by_xpath('//*[@id="new_entry"]/div[2]/div/div[4]/div[1]/div[2]/div/div[2]/div/label[2]/div/div[1]/i')[0].click()
time.sleep(2)
img = ImageGrab.grab(bbox=(0, 0, 1024, 1024))
img.save('D:\MSwork\python\p1.jpg')
driver.find_element_by_xpath('//*[@id="new_entry"]/div[2]/div/div[5]/a[2]').click()
time.sleep(2)
driver.switch_to.default_content()


driver.switch_to.frame(iframe)
driver.find_element_by_id('entry_field_18').send_keys("2020/3/12") #首先需要点击日期输入框
driver.find_element_by_id('entry_field_19').send_keys("自动化")
driver.find_element_by_id('entry_field_20').send_keys("13888888888")
time.sleep(3)
img = ImageGrab.grab(bbox=(0, 0, 1024, 1024))
img.save('D:\MSwork\python\p2.jpg')
driver.find_element_by_xpath('//*[@id="new_entry"]/div[2]/div/div[5]/a[2]').click()
time.sleep(2)
コード例 #42
0
ファイル: adb.py プロジェクト: Gipn0za/worldFlipperATS
 def window_capture(self,hwnd,fileName):
     game_Rect = win32gui.GetWindowRect(int(hwnd))
     src_Image = ImageGrab.grab(game_Rect)
     src_Image = src_Image.resize(self,screen_Size,Image.ANTIALIAS)
     src_Image.save(fileName)
     self.screen_Hot = src_Image
コード例 #43
0
def getspeed():
    frame = ImageGrab.grab(
        bbox=(590, 486, 640,
              512))  #bbox specifies specific region (bbox= x,y,width,height)
    frame_np = np.array(frame)
    frame_BW = cv2.cvtColor(frame_np, cv2.COLOR_BGR2GRAY)
    out = cv2.inRange(frame_BW, 200, 256)

    kernel = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) / 9
    temp = cv2.filter2D(out, -1, kernel)
    input = cv2.inRange(temp, 50, 256)

    d1array = np.zeros(7)
    d2array = np.zeros(7)

    # top digit: 1: 11,8   A: 8,3 B: 13,9 C: 13,18 D: 8,21 E: 5,17 F: 5,7 G: 8,12
    if input[2, 6] == 255:
        d1array[0] = 1
    if input[8, 12] == 255:
        d1array[1] = 1
    if input[17, 12] == 255:
        d1array[2] = 1
    if input[20, 7] == 255:
        d1array[3] = 1
    if input[16, 3] == 255:
        d1array[4] = 1
    if input[6, 3] == 255:
        d1array[5] = 1
    if input[11, 6] == 255:
        d1array[6] = 1


# second digit: 1: 25,8  A: 23,3 B: 28,8, C:28.17 D: 22,22 E: 19,18 F: 19,8 G: 23,13
    if input[2, 21] == 255:
        d2array[0] = 1
    if input[7, 27] == 255:
        d2array[1] = 1
    if input[16, 27] == 255:
        d2array[2] = 1
    if input[20, 22] == 255:
        d2array[3] = 1
    if input[17, 18] == 255:
        d2array[4] = 1
    if input[7, 18] == 255:
        d2array[5] = 1
    if input[11, 22] == 255:
        d2array[6] = 1

    if input[16, 9] == 255:
        digit1 = 1
    elif np.all(d1array == [1, 1, 1, 1, 1, 1, 0]):
        digit1 = 0
    elif np.all(d1array == [1, 1, 0, 1, 1, 0, 1]):
        digit1 = 2
    elif np.all(d1array == [1, 1, 1, 1, 0, 0, 1]):
        digit1 = 3
    elif np.all(d1array == [0, 1, 1, 0, 0, 1, 1]):
        digit1 = 4
    elif np.all(d1array == [1, 0, 1, 1, 0, 1, 1]):
        digit1 = 5
    elif np.all(d1array == [1, 0, 1, 1, 1, 1, 1]):
        digit1 = 6
    elif np.all(d1array == [1, 1, 1, 0, 0, 0, 0]):
        digit1 = 7
    elif np.all(d1array == [1, 1, 1, 1, 1, 1, 1]):
        digit1 = 8
    elif np.all(d1array == [1, 1, 1, 1, 0, 1, 1]):
        digit1 = 9
    else:
        digit1 = 0

    if input[16, 23] == 255:
        digit2 = 1
    elif np.all(d2array == [1, 1, 1, 1, 1, 1, 0]):
        digit2 = 0
    elif np.all(d2array == [1, 1, 0, 1, 1, 0, 1]):
        digit2 = 2
    elif np.all(d2array == [1, 1, 1, 1, 0, 0, 1]):
        digit2 = 3
    elif np.all(d2array == [0, 1, 1, 0, 0, 1, 1]):
        digit2 = 4
    elif np.all(d2array == [1, 0, 1, 1, 0, 1, 1]):
        digit2 = 5
    elif np.all(d2array == [1, 0, 1, 1, 1, 1, 1]):
        digit2 = 6
    elif np.all(d2array == [1, 1, 1, 0, 0, 0, 0]):
        digit2 = 7
    elif np.all(d2array == [1, 1, 1, 1, 1, 1, 1]):
        digit2 = 8
    elif np.all(d2array == [1, 1, 1, 1, 0, 1, 1]):
        digit2 = 9
    else:
        digit2 = 0

    #print(d1array)

    return digit1 * 10 + digit2
コード例 #44
0
import numpy as np
from PIL import ImageGrab
import cv2
fourcc = cv2.VideoWriter_fourcc(*"XVID")
size = (ImageGrab.grab()).size
output = cv2.VideoWriter("output.mp4", fourcc, 5.0, size)
while True:
    img = np.array(ImageGrab.grab())
    frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    cv2.imshow("Screen", frame)
    output.write(img)
    if cv2.waitKey(1) == 27:
        break
output.release()
cv2.destroyAllWindows()
コード例 #45
0
def capture_direction():
    img = np.array(ImageGrab.grab(bbox=direction_bbox))
    return img
コード例 #46
0
from PIL import ImageGrab
import numpy as np
pil_img = ImageGrab.grab()
opencv_img = np.array(pil_img)
コード例 #47
0
def doLoop(sc):
    global iterator, enabled, ourScreenWidth, lastMillis, ourOffset

    iterator += 1

    sc.enter(0.2, 1, doLoop, (sc, ))

    if (iterator == 100):

        if (enabled):

            print "checking monitor..."
            iterator = 0
            if (os.name == "posix"):
                if (os.system(
                        "system_profiler SPDisplaysDataType | grep -Ei 'toshiba|argley'"
                ) > 0):
                    enabled = 0
                    print "TOSHIBA or ARGLEY not found. Sleeping..."
                else:
                    enabled = 1
                    print "screen connected."
            else:
                enabled = 1

            bigImage = ImageGrab.grab(bbox=(0, 0, ourScreenWidth, 440))
            ourScreenWidth = bigImage.size[0]
            print "screen size: " + str(ourScreenWidth)
            getOffset(bigImage)
            print "letterbox offset: " + str(ourOffset)

        else:

            bigImage = ImageGrab.grab(bbox=(0, 00, ourScreenWidth, 440))
            getOffset(bigImage)
            print(ourOffset)

            print "sleeping..."

    if (enabled):

        colors = [0 for i in xrange(ourStrip)]
        colorCommand = "WINKYBASESA9"

        try:
            ourImage = ImageGrab.grab(bbox=(0, ourOffset, ourScreenWidth,
                                            ourOffset + 100))

            for chunk in reversed(range(len(colors))):
                colors[chunk] = getChunk(chunk, ourImage)
                if colors[chunk][3] == 0:
                    print "screen width changed, got transparent area"
                    ourWidth = ImageGrab.grab().size[0]
                    colors[chunk] = getChunk(chunk)
                colorCommand = colorCommand + encodeColor(
                    colors[chunk][0], colors[chunk][1], colors[chunk][2])

        except:
            print "capture yacked: ", sys.exc_info()[0]

            #	fileOut = open("/tmp/ambilight","w+")
    #	fileOut.write(colorCommand)
    #	fileOut.close()

        colorCommand = colorCommand.replace("\"", r"\"") + "\r\n"

        #	 	print "echo \"" + colorCommand + "\">/var/local/nrf24/out/ambilight"
        try:
            print(colorCommand)
            sock.send(colorCommand)
            # print str(iterator)+" " + str(ourOffset)

    #		scp.put("/tmp/ambilight","/var/local/nrf24/out/ambilight")
        except socket.error as e:
            print "connection failed"
            sock.connect((TCP_IP, TCP_PORT))
コード例 #48
0
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 28 23:01:09 2018

@author: Administrator
"""
"""python + opencv 实现屏幕录制_by-_Zjh_"""
from PIL import ImageGrab
import numpy as np
import cv2
p = ImageGrab.grab()  #获得当前屏幕
k = np.zeros((200, 200), np.uint8)
a, b = p.size  #获得当前屏幕的大小
fourcc = cv2.VideoWriter_fourcc(*'XVID')  #编码格式
video = cv2.VideoWriter('test.avi', fourcc, 16,
                        (a, b))  #输出文件命名为test.mp4,帧率为16,可以自己设置
while True:
    im = ImageGrab.grab()
    imm = cv2.cvtColor(np.array(im), cv2.COLOR_RGB2BGR)  #转为opencv的BGR格式
    video.write(imm)
    cv2.imshow('imm', 0)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        print("QUIT\n")
        break
video.release()
cv2.destroyAllWindows()
コード例 #49
0
ファイル: trigger.py プロジェクト: Jolken/csgoTrigger
import numpy as np
from PIL import ImageGrab
from cv2 import cvtColor, COLOR_BGR2GRAY, Canny
import time
import keyboard
import winsound
from directkeys import PressKey, ReleaseKey

screenShot = ImageGrab.grab()
HALF_WIDTH = screenShot.size[0] / 2
HALF_HEIGHT = screenShot.size[1] / 2
HOTKEY = 'shift+z'
FIRE_KEY = 0x18  #key codes --> https://msdn.microsoft.com/en-us/library/windows/desktop/bb321074(v=vs.85).aspx
SHOTS_NUMBER = 5  #different on each weapon
del screenShot
isEnabled = False


def inverseIsEnabled():
    global isEnabled
    if isEnabled:
        winsound.Beep(1000, 200)
        print('Disabled')
        isEnabled = False
    else:
        winsound.Beep(500, 200)
        print('Enabled')
        isEnabled = True


#Detect edges in image
コード例 #50
0
#!/usr/bin/python

import sched, time, sys, socket, os
from PIL import ImageGrab, Image, ImageStat
from pprint import pprint

ourScreenWidth = ImageGrab.grab().size[0]
ourOffset = 0
ourStrip = 8
TCP_IP = "raspbmc.local"
TCP_PORT = 10000
BUFFER_SIZE = 1024


def millis():
    return int(round(time.time() * 1000))


def getChunk(chunk, ourImage):
    global ourOffset
    if chunk > (ourStrip - 1):
        return (0, 0, 0, 0)

    ourStripWidth = ourScreenWidth / (ourStrip)

    ourCrop = ourImage.crop(
        (ourStripWidth * chunk, 0, ourStripWidth * (chunk + 1), 100))
    ourMath = ImageStat.Stat(ourCrop)

    color = (round(ourMath.rms[0]), round(ourMath.rms[1]),
             round(ourMath.rms[2]), round(ourMath.rms[3]))
コード例 #51
0
def capture_frontview():
    img = np.array(ImageGrab.grab(bbox=(0, 40, 800, 640)))
    # Preprocess needed
    return img
コード例 #52
0
upgrade_image = Image.open('element/Upgrade.png')

game_window = get_game_window()

game_center = (int((game_window[2] - game_window[0]) / 2) + game_window[0],
               int((game_window[3] - game_window[1]) / 2) + game_window[1])

# Give the game focus.
safe_click_pos = (max(0, game_window[0] - 1), max(0, game_window[1]))
Mouse.click(*safe_click_pos)
time.sleep(0.200)

expected_x = game_center[0] + upgrade_offset[0]
expected_y = game_center[1] + upgrade_offset[1]
# Calibrate upgrade_image offset.
upgrade_pos = image_search(ImageGrab.grab(), upgrade_image, expected_x, expected_y, radius=10)
if upgrade_pos[0] == -1:
    logging.error("Failed to find upgrade button, expected it to be near {0}, {1}".format(expected_x, expected_y))
    sys.exit(1)

logging.log(VERBOSE, "Upgrade button found at: {0}, offset: {1},{2}".format(
    upgrade_pos, expected_x - upgrade_pos[0], expected_y - upgrade_pos[1]))

# Adjust pos to be a clicking position.
upgrade_pos = (upgrade_pos[0] + int(upgrade_image.size[0] / 2), upgrade_pos[1] + int(upgrade_image.size[1] / 2))
# Save button position offset from that.
save_pos = (upgrade_pos[0] + 139, upgrade_pos[1])

digit_positions = [
    ((game_center[0] + 387, game_center[1] + 55), args.ignore1),
    ((game_center[0] + 387, game_center[1] + 80), args.ignore2),
コード例 #53
0
def capture_mapview():
    img = np.array(ImageGrab.grab(bbox=mapview_bbox))
    return img
コード例 #54
0
                      (0, 0, 255), 3)
    else:
        return


cv2.namedWindow("img_color")
cv2.setMouseCallback("img_color", mouse_callback)

# 트랙
cv2.namedWindow("img_result")
cv2.createTrackbar("threshold", "img_result", 0, 255, nothing)
cv2.setTrackbarPos("threshold", "img_result", 30)

while True:
    # img_color = cv.imread('C:/Users/COM-11/Documents/hsv.jpg')
    img_original = ImageGrab.grab(bbox=(window_x, window_y, window_w,
                                        widow_h))  # x, y, w, h
    img_np = np.array(img_original)
    img_color = cv2.cvtColor(img_np, cv2.COLOR_BGR2RGB)

    org_height, org_width = 480, 680
    # print(height, width)
    """
    img_color = cv2.resize(
        img_color, (org_width, org_height), interpolation=cv2.INTER_AREA
    )
    """

    # 원본 영상을 HSV 영상으로 변환합니다.
    img_hsv = cv2.cvtColor(img_color, cv2.COLOR_BGR2HSV)

    # 범위 값으로 HSV 이미지에서 마스크를 생성합니다.
コード例 #55
0
 def capture_current_image(self):
     self.current_image = ImageGrab.grab()
     print(type(self.current_image))
コード例 #56
0
ファイル: bbb.py プロジェクト: rabbitsfish/untitled
 def __init__(self, fps):
     self.fps = fps
     self.curScreen = ImageGrab.grab()  # 获取屏幕对象
     self.heigth, self.width = self.curScreen.size
     self.video = cv2.VideoWriter('video02.avi', cv2.VideoWriter_fourcc(*'XVID'), self.fps, (self.height, self.width))
コード例 #57
0
ファイル: test.py プロジェクト: toothlessG22/Summer2017
from lib import OCR
import math
import cv2
import time

c = 0

t = time.clock()
BTcoords = openCVLocate.locateCenter('img/blueTeamTT.png')
print("BT:" + str(time.clock() - t))
botimg = cv2.imread('img/Bot.png' ,0)

while True:
    ctime = time.clock()
    t = time.clock()
    im = ImageGrab.grab()
    print("IG:" + str(time.clock() - t))
    t = time.clock()
    im = im.convert('RGB')
    print("CON:" + str(time.clock() - t))
    t = time.clock()
    im = np.array(im)
    print("NP:" + str(time.clock() - t))
    t = time.clock()
    y = math.floor(BTcoords[1]) + 67
    x = math.floor(BTcoords[0]) - 672
    c_img_array = im[y:y+2, x:x+293]
    for i in range(292, 0, -1):
        if(int(c_img_array[0][i][0]) * int(c_img_array[0][i][1]) * int(c_img_array[0][i][2]) > 1000000):
            print(i/293)
            break
コード例 #58
0
import numpy as np
import cv2
from PIL import ImageGrab
import time

last_time = time.time()
while (True):
    screen = np.array(ImageGrab.grab(bbox=(0, 40, 600, 500)))
    print('loop took {} seconds'.format(time.time() - last_time))
    last_time = time.time()
    cv2.imshow('window', cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
    if cv2.waitKey(25) & 0xFF == ord('q'):
        cv2.destroyAllWindows()
        break
コード例 #59
0
ファイル: pyDino.py プロジェクト: JLENF/pydino
i = 1
diff2 = 0
start = time.time()


def pula():
    keyboard.release(Key.down)
    keyboard.press(Key.up)
    time.sleep(0.165)  # 165mS para simular o tempo de uma tecla pressionada
    print('PULA')
    keyboard.release(Key.up)
    keyboard.press(Key.down)


while True:
    image = ImageGrab.grab(bbox=(40, 371, 800, 530))
    #image.putpixel((x,y),(255,0,0));

    #image.save('C:\\Python\\1-SOURCES\\cool\\game-dino\\snapshots\\image' + str(i) +'.png');
    #i = i + 1;
    keyboard.press(Key.down)

    # verifica se barra de espaco foi pressionada para zerar a contagem
    if msvcrt.kbhit():
        if msvcrt.getch() == b' ':
            start = time.time()

    # cronometro
    now = time.time()
    diff = now - start
コード例 #60
0
def pictureshoot():
    im = ImageGrab.grab()
    now = time.strftime("%Y-%m-%d %H_%M_%S")
    imagename = os.path.abspath('..') + "\\report\\" + now + ".jpeg"
    im.save(imagename)