Exemplo n.º 1
0
def getSectors(tophalf=False):
	sector = [] # list of PIL images of sectors of the screen (len should be equal to numLeds)
	(mainx1, mainy1, mainx2, mainy2) = screens[1]
	(secx1, secy1, secx2, secy2) = screens[0]
	
	#need to get the total screen_x length
	sector_length = ( abs(mainx2) + abs(secx1) ) / (numLeds) #Note: 3520/31=110 and this is not completely correct -- particulary for the last picture
	#may want to experiment with better sector_length values
	#sector_length = 120
	div = 1
	if tophalf:
		div = 2 # used in last parameter of getarea when appended to sector below
	
	halfnumLeds = numLeds / 2
	
	for x in range(numLeds):
		# Note that case 14 produces a bad image -- and that the last part of the desktop(rightmost) is lost -- due to inprecision of sector_length
		# We could handle this case specially... however I think there's a better solution I've yet to find. The effect of this error doesn't bother me.
		if x * sector_length < abs(secx1): #left screen, screen[1]; a second monitor on the left will have a negative value
			sector.append(getRectAsImage( (secx1 + (x * sector_length), 
						secy1, 
						secx1 + (x * sector_length) + sector_length,
						secy2 / div)) )
		else: #right (main) screen, screen[0]
			#note that we can't use same maths as above, seeing as the right screen will start at screen coord 0,0 but x is at numLeds / 2
			sector.append(getRectAsImage( (0 + ((x - halfnumLeds) * sector_length),
						0, 
						0 + ((x - halfnumLeds) * sector_length) + sector_length,  
						mainy2 / div)) )
		
	return sector
def get_captcha_v1_hostage():
    print("--- Looking for Captcha ---")

    try:
        print("--- Looking for popups... ---")

        browser.find_element_by_css_selector('.qtip-button').click(
        ) or browser.find_element_by_css_selector('.submitBig').click()
        print("!!! Pop up detected !!!")
        pass
    except NoSuchElementException:
        print("+++ No popup detected +++")

        pass

    captcha_xpath_code_text = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[1]/a'
    )
    captcha_xpath_image = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[2]/img'
    )
    captcha_empty_space = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[3]')
    captcha_xpath_input_box = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[3]/input'
    )
    captcha_xpath_image_questionmark = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[3]/strong/a'
    )
    captcha_css_selector_code_text = check_exists_by_css_selector(
        '#page > form:nth-child(1) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(3) > td:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1) > a:nth-child(1)'
    )
    captcha_css_selector_image = check_exists_by_css_selector(
        '#page > form:nth-child(1) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(3) > td:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(2) > img:nth-child(1)'
    )
    #captcha_css_selector_input = check_exists_by_css_selector('.input2')
    captcha_css_selector = check_exists_by_css_selector(
        '#page > form:nth-child(2) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(7) > td:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1) > a:nth-child(1)'
    )

    if captcha_xpath_code_text or captcha_css_selector or captcha_xpath_image or captcha_empty_space or captcha_xpath_input_box or captcha_xpath_image_questionmark or captcha_css_selector_code_text or captcha_css_selector_image == True:

        print("!!! Captcha found !!!")
        box = 2062, 390, 2167, 436

        #full_screen_capture = getScreenAsImage()
        Screen_capture = getRectAsImage((box))
        Screen_capture.save(os.getcwd() + '\\full_snap__' + 'hostage.png',
                            format='png')

        time_need_to_stay_idle = 300
        print("!!! The bot will restart after %s seconds !!!" %
              (time_need_to_stay_idle))
        time.sleep(time_need_to_stay_idle)
        restart()

    else:
        print("+++ Captcha not found +++")

        return
Exemplo n.º 3
0
 def OnMouseUp(self, event):
     self.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
     x1, y1, x2, y2 = (self.c1.x, self.c1.y, self.c2.x, self.c2.y)
     fileName = os.path.join(os.environ['TEMP'], 'screenshot.png')
     rect256 = getRectAsImage((x1, y1, x2, y2))
     rect256.save(fileName, format='png')
     self.Close()
Exemplo n.º 4
0
def image(image_type, monitor):
    if image_type:
        screenshot = getRectAsImage(getDisplayRects()[monitor])

        return {
            'message': 'Screenshot successfully captured',
            'screenshot': screenshot,
            'text_mode': 'success'
        }
    elif not image_type:
        cam = cv2.VideoCapture(monitor)

        check, frame = cam.read()
        if not check:
            raise Exception('Cam unavailable')

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = Image.fromarray(frame)

        cam.release()

        return {
            'message': 'Cam screenshot successfully captured',
            'screenshot': frame,
            'text_mode': 'success'
        }
    else:
        raise Exception('Error message')
def dominant_screen_color(initial_color, func_bounds=lambda: None):
    """
    https://stackoverflow.com/questions/50899692/most-dominant-color-in-rgb-image-opencv-numpy-python
    """
    monitor = get_monitor_bounds(func_bounds)
    if "full" in monitor:
        screenshot = getScreenAsImage()
    else:
        screenshot = getRectAsImage(str2list(monitor, int))

    downscale_width, downscale_height = screenshot.width // 4, screenshot.height // 4
    screenshot = screenshot.resize((downscale_width, downscale_height),
                                   Image.HAMMING)

    a = np.array(screenshot)
    a2D = a.reshape(-1, a.shape[-1])
    col_range = (256, 256, 256)  # generically : a2D.max(0)+1
    eval_params = {
        'a0': a2D[:, 0],
        'a1': a2D[:, 1],
        'a2': a2D[:, 2],
        's0': col_range[0],
        's1': col_range[1]
    }
    a1D = ne.evaluate('a0*s0*s1+a1*s0+a2', eval_params)
    color = np.unravel_index(np.bincount(a1D).argmax(), col_range)

    color_hsbk = list(utils.RGBtoHSBK(color, temperature=initial_color[3]))
    # color_hsbk[2] = initial_color[2]  # TODO Decide this
    return color_hsbk
Exemplo n.º 6
0
 def test_invalidRect(self):
     fname = tempfile.mktemp()
     self.addCleanup(self._tryUnlink, fname)
     self.assertRaises(
         ValueError,
         lambda: saveRectToBmp(fname, rect=(100, 100, 100, 100)))
     self.assertRaises(
         ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, 99, 100)))
     self.assertRaises(
         ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, 100, 99)))
     self.assertRaises(
         ValueError,
         lambda: saveRectToBmp(fname, rect=(100, 100, 100, None)))
     self.assertRaises(
         ValueError,
         lambda: saveRectToBmp(fname, rect=(100, 100, "100", None)))
     self.assertRaises(
         ValueError,
         lambda: saveRectToBmp(fname, rect=(100.0, 100, 101, 101)))
     self.assertRaises(
         ValueError,
         lambda: saveRectToBmp(fname, rect=(100, 100, 101, 101.0)))
     self.assertRaises(
         ValueError,
         lambda: saveRectToBmp(fname, rect=(100, 100, 200, 200, 200)))
     self.assertRaises(TypeError, lambda: saveRectToBmp(fname, rect=None))
     self.assertRaises(TypeError, lambda: getRectAsImage(rect=None))
Exemplo n.º 7
0
def fullscreenGrab():
    global puzzle_filename
    rect256 = getRectAsImage((3840, 590, 5760, 1670))
    puzzle_filename = 'C:\\Users\\Michael\\Documents\\Projects\\usr\\bin\\python\\8-Puzzle\\15-puzzle-clicker\\puzzle_' + time.strftime("%Y%m%d-%H%M%S") + '.png'
    # rect256.save(puzzle_filename, format='png')
    # img = Image.open(puzzle_filename)
    puzzle = rect256.crop((1650, 175, 1778, 303))
    puzzle.save(puzzle_filename)
Exemplo n.º 8
0
 def Start_buttonOnActivated(self):
     if (self.meeting_start_hour, self.meeting_end_hour) < (self.meeting_end_hour, self.meeting_end_hour):
         screens = (getDisplayRects())
         rect = getRectAsImage(screens[self.capture_screen])
         # 요기 부분은 피드백을 위해 사진을 보여주는 부분이에여 나중에 서버로 전송하면 돼요!!
         # rect.save()
         rect.show()
         print("returned from function capture")
Exemplo n.º 9
0
def parse_command(data):
    if data[0] == "ping":
        if data[1] == "true":
            _, _, (x, y) = win32gui.GetCursorInfo()
            image = getRectAsImage((x - 200, y - 300, x + 200, y + 300))

            val = ""

            try:
                image.save('im.png', format='png')

                with open("im.png", "rb") as imageFile:
                    val += str(base64.b64encode(imageFile.read()))
            except OSError:
                print("failed to create image")

            return val + ";"
        else:
            return "pong;"
    elif data[0] == "mouseClick":
        amount = 1
        btn = 'left'
        for param in data:
            if param[:7] == "amount=":
                amount = param[7:]
            elif param[:4] == "btn=":
                btn = param[4:]
        WindowsControl.click(amount=amount, btn=btn)
        return "mouse click;"
    elif data[0] == "mouseMove":
        WindowsControl.move_rel(data[1], data[2])
        return "mouse moved " + data[1] + " " + data[2] + ";"
    elif data[0] == "mouseDrag":
        btn = 'left'
        if data[1]:
            btn = data[1]
        WindowsControl.drag_start(btn=btn)
        return "mouse drag start;"
    elif data[0] == "mouseDragEnd":
        btn = 'left'
        if data[1]:
            btn = data[1]
        WindowsControl.drag_end(btn=btn)
        return "mouse drag end;"
    elif data[0] == "vscroll":
        WindowsControl.vscroll_wheel(data[1])
        return "vertical scroll;"
    elif data[0] == "hscroll":
        WindowsControl.hscroll_wheel(data[1])
        return "horizontal scroll;"
    elif data[0] == "zoom":
        WindowsControl.zoom(data[1])
        return "zoom;"
    elif data[0] == "k":
        WindowsControl.keyboard_entry(data[1])
        return "keyboard: " + data[1] + ";"
    else:
        return data[0] + " is not a recognized command;"
Exemplo n.º 10
0
 def capture(self) -> Optional[Any]:
     hwnd = GetForegroundWindow()
     if hwnd:
         if GetWindowText(hwnd) == self.__window_name:
             rect = self.__get_handle_rect(hwnd)
             self.__maybe_align(rect)
             screen_shot = getRectAsImage(rect.tuple)
             return cv2.cvtColor(np.array(screen_shot), cv2.COLOR_RGB2BGR)
     return None
Exemplo n.º 11
0
def getMembers():
    global membersOnline
    members = membersOnline
    img = getRectAsImage(cfg.membersCountPos)
    txt = pytesseract.image_to_string(img)
    if len(txt) > 0:
        if hasNumbers(txt):
            members = int(''.join(i for i in txt if i.isdigit()))
    return members
Exemplo n.º 12
0
    def pixelsearch(self, pixel, offset=(0, 0, 0, 0), click=''):
        """
        Searches first pixel and if click = '', return its absolute (x, y) position.

        :param pixel: a tuple (R, G, B) of int values corresponding to the pixel values.
        :param offset: a tuple (left, top, right, bottom) for how much to offset the search area.
                offset crops the panel search area so you can control better where you want to find the pixel,
                in case it finds pixels before that are not intended.
        :param click: whether to 'click', 'double' click, 'move', or if default '', return the position.
        """
        p = self
        if DRAW_OUTLINES:
            p.draw_outline()

        #Lets set our haystack bounding box to be searched
        hx1, hy1 = p.rectangle().left + offset[0], p.rectangle(
        ).top + offset[1]
        hx2, hy2 = p.rectangle().right - offset[2], p.rectangle(
        ).bottom - offset[3]

        #save that haystack as a PIL RGB image and then as a numpy array
        im = getRectAsImage((hx1, hy1, hx2, hy2))
        image = np.array(im)

        #image is RGB, but cv2 uses BGR for searching, so we need to invert it
        image = image[:, :, ::-1].copy()

        #we're not doing an upper or lower boundary because we want exactly the pixel that we specified
        lower = np.array([pixel[2], pixel[1], pixel[0]])  # BGR-code
        upper = np.array([pixel[2], pixel[1], pixel[0]])  # BGR-code

        mask = cv2.inRange(image, lower, upper)
        coord = cv2.findNonZero(mask)

        try:
            #foundcoord = first pixel found, we could implement a way to get the next pixels too.
            #             we're not limited to the first one.
            foundcoord = (coord[0][0][0], coord[0][0][1])

            #get the absolute position
            absx, absy = self.abspos_from_rel(foundcoord, offset=offset)
            print(f'Pixel found at {absx}, {absy}')

            if click == '':
                return (absx, absy)
            else:
                mouse.move(absx, absy)

            if click == 'single':
                mouse.click()
            elif click == 'double':
                mouse.double_click()
            elif click == 'move':
                return

        except TypeError:
            print('Pixel not found!')
Exemplo n.º 13
0
def getwindowshot():
    global rsWindow
    if rsWindow >= 1:
        loc = win32gui.GetWindowRect(rsWindow)
        # cut away right 1/3 and bottom 1/3 of screenshot
        cropped_rect = (loc[0], loc[1], int(loc[0]+abs(loc[2]-loc[0])*(2/3)),
                        int(loc[1]+abs(loc[3]-loc[1])*(2/3)))
        ss = getRectAsImage(rect=cropped_rect)
        return ss
Exemplo n.º 14
0
def grab() -> Union[Image, None]:
    try:
        pos1, pos2 = _sort_coords(_run_tkinter())
        image = getRectAsImage(
            (*pos1,
             *pos2)).convert('L')  # BUG, breaks with Window's content scaling
        #image.show() # For debugging
        return image  # in PIL format
    except:
        return None
Exemplo n.º 15
0
 def screenCapture(self):
     loop = QEventLoop()
     QTimer.singleShot(self.capture_cycle * 1000, loop.quit)
     screens = (getDisplayRects())
     rect = getRectAsImage(screens[self.capture_screen])
     # 요기 부분은 피드백을 위해 사진을 보여주는 부분이에여 나중에 서버로 전송하면 돼요!!
     # rect.save()
     rect.show()
     print("screen captured")
     loop.exec_()
Exemplo n.º 16
0
def util_takeScreenshot(bbox=None, show=True, filepath=""):
    """    
    :param bbox: optional bounding box (x1,y1,x2,y2)
    """
    img = getRectAsImage(bbox)
    if show:
        img_showImage(img)
    if filepath != "":
        img_saveImageFile(img, filepath)
    return img
Exemplo n.º 17
0
def avg_screen_color(initial_color, func_bounds=lambda: None):
    """ Capture an image of the monitor defined by func_bounds, then get the average color of the image in HSBK"""
    monitor = get_monitor_bounds(func_bounds)
    if "full" in monitor:
        screenshot = getScreenAsImage()
    else:
        screenshot = getRectAsImage(str2list(monitor, int))
    # Resizing the image to 1x1 pixel will give us the average for the whole image (via HAMMING interpolation)
    color = screenshot.resize((1, 1), Image.HAMMING).getpixel((0, 0))
    color_hsbk = list(utils.RGBtoHSBK(color, temperature=initial_color[3]))
    return color_hsbk
Exemplo n.º 18
0
 def screenshot(window_title=None):
     if window_title:
         hwnd = win32gui.FindWindow(None, window_title)
         if hwnd:
             if win32gui.IsWindowVisible(
                     hwnd) and win32gui.GetWindowPlacement(hwnd)[1] != 2:
                 rect = win32gui.GetWindowRect(hwnd)
                 screenshot = getRectAsImage(rect)
                 return screenshot
     else:
         screenshot = getScreenAsImage()
         return screenshot
Exemplo n.º 19
0
def main():

    # pre-load images on startup
    # C D H S
    cardLibrary = {}
    rollSuit = ["C", "D", "H", "S"]

    for i in range(0, 5):
        COMMUNITY_XPOS.append(COMMUNITY_XPOS_START + (COMMUNITY_OFFSET_X * i))

    for n in range(0, 13):
        reverser = 5
        for s in range(0, 4):
            reverser -= 1
            nS = reverser
            cNum = n + 2
            lS = rollSuit[s]
            archedImg = Image.open("cardDeck\\community\\{}{}.png".format(
                cNum, lS))

            archedImg = archedImg.crop((0, 40, 35, 80))

            fatHash = imagehash.phash(archedImg)

            cardLibrary["{}{}".format(
                cNum, lS)] = [fatHash, Card(cNum, nS), archedImg]

    # x = COMMUNITY_XPOS
    # y = COMMUNITY_YPOS + OFFSET
    x = LHU_XPOS - 5
    y = SU_YPOS + COMMUNITY_OFFSET_Y

    w = x + WIDTH
    h = (y + HEIGHT)
    imt = getRectAsImage((x, y, w, h))
    # imt = getRectAsImage(
    #     (RHC_XPOS, CANT_YPOS, RHC_XPOS+WIDTH,CANT_YPOS+HEIGHT)).rotate(
    #         10,Image.BILINEAR,fillcolor="white")
    imt.show()
    testImageHash = imagehash.phash(imt)

    winningKey = 0
    lowestDiff = 9999999
    for k, v in cardLibrary.items():
        diff = abs(v[0] - testImageHash)
        print(k, diff, lowestDiff)
        if diff < lowestDiff:
            lowestDiff = diff
            winningKey = k

    print(winningKey, cardLibrary[winningKey][1])
    cardLibrary[winningKey][2].show()
Exemplo n.º 20
0
	def test_invalidRect(self):
		fname = tempfile.mktemp()
		self.addCleanup(self._tryUnlink, fname)
		self.assertRaises(ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, 100, 100)))
		self.assertRaises(ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, 99, 100)))
		self.assertRaises(ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, 100, 99)))
		self.assertRaises(ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, 100, None)))
		self.assertRaises(ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, "100", None)))
		self.assertRaises(ValueError, lambda: saveRectToBmp(fname, rect=(100.0, 100, 101, 101)))
		self.assertRaises(ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, 101, 101.0)))
		self.assertRaises(ValueError, lambda: saveRectToBmp(fname, rect=(100, 100, 200, 200, 200)))
		self.assertRaises(TypeError, lambda: saveRectToBmp(fname, rect=None))
		self.assertRaises(TypeError, lambda: getRectAsImage(rect=None))
 def get_image_of_selected_area(self):
     self.start_point = self.start_point.x(), self.start_point.y()
     self.end_point = self.end_point.x(), self.end_point.y()
     x_axis, y_axis = self.get_coordinates_for_position([self.start_point, self.end_point])
     selected_area = (min(x_axis), min(y_axis), max(x_axis), max(y_axis))
     self.img = getRectAsImage(selected_area)
     self.img.save(r'images\temporary_snip_for_display.png', format='png')
     #alternate picture to get better img to text conversion
     width, heigth = self.img.size
     newsize = (width * 3, heigth * 3)
     self.img = self.img.resize(newsize, resample=Image.BICUBIC)
     self.img.save(r'images\temporary_snip.png', format='png')
     return
Exemplo n.º 22
0
def main():
	print("""\
This program helps you test whether screengrab_win32 has memory leaks
and other problems.  It takes a screenshot repeatedly and discards it.

Open Task Manager and make sure Physical Memory % is not ballooning.
Memory leaks might not be blamed on the python process itself (which
will show low memory usage).

Lock the workstation for a few minutes; make sure there are no leaks
and that there are no uncaught exceptions here.

Repeat above after RDPing into the workstation and minimizing RDP;
this is like disconnecting the monitor.

Change your color depth settings.  Add and remove monitors.  RDP
in at 256 colors.
""")
	while True:
		try:
			getScreenAsImage()
			print("S", end=" ")
			sys.stdout.flush()
		except GrabFailed as e:
			print(e)

		try:
			getDisplaysAsImages()
			print("D", end=" ")
			sys.stdout.flush()
		except GrabFailed as e:
			print(e)

		try:
			getRectAsImage((0, 0, 1, 1))
			print("R", end=" ")
			sys.stdout.flush()
		except GrabFailed as e:
			print(e)
Exemplo n.º 23
0
def screen_avg(_screen):
    screen_data = {}

    # Win version uses DesktopMagic for multiple displays
    if params.BUILD == 'win':
        try:
            img = getRectAsImage(_screen.bbox)
        except IndexError:
            utility.display_check(_screen)
            img = getRectAsImage(_screen.bbox)
    # Mac version uses standard PIL ImageGrab
    else:
        img = ImageGrab.grab()

    # Resize for performance - this could be a user editable setting
    size = (16, 9)
    img = img.resize(size)

    # Enhance saturation according to user settings
    sat_scale_factor = float(_screen.sat)
    if sat_scale_factor > 1.0:
        sat_converter = ImageEnhance.Color(img)
        img = sat_converter.enhance(sat_scale_factor)

    zone_result = []
    if _screen.zone_state:
        for zone in _screen.zones:
            box = (int(zone['x1']), int(zone['y1']), int(zone['x2']),
                   int(zone['y2']))
            zone_img = img.copy().crop(box)
            zone_data = img_avg(zone_img)
            zone_data['bulbs'] = zone['bulbs']
            zone_result.append(zone_data)

        screen_data['zones'] = zone_result
    else:
        screen_data = img_avg(img)

    return screen_data
Exemplo n.º 24
0
    def getFrameWin(self):
        # get corner coordinates of capture window
        position = win32gui.GetWindowRect(self.hwnd)

        # save pixels into array
        frame = getRectAsImage(position)
        frame = np.array(frame)

        frame = frame[self.crop[1]:self.crop[3], self.crop[0]:self.crop[2]]
        frameGs = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        frameGsRr = cv2.resize(frameGs, self.outDim)

        return frameGsRr
Exemplo n.º 25
0
def testConvolution(area, filename):
    img = getRectAsImage(area)
    img = contrast(img, 200)

    downRatio = max(img.size) / 256
    lowSize = tuple(int(d // downRatio) for d in img.size)
    print(lowSize)

    imgSmall = img.resize(lowSize, resample=Image.BILINEAR)
    result = imgSmall.resize(img.size, Image.NEAREST)

    if filename:
        result.save(filename + timestamp() + '.png', format='png')
    return img
Exemplo n.º 26
0
def getPixmapFromScreen(posx,posy,W,H):
        """
        get screenshot with posx,posy,w,h and save it to local file 
        and return the created file name
        else return None
        """
        if(W == 0 or H == 0):
            return None
        
        imDisplay = getRectAsImage((posx,posy,posx+W,posy+H))
        qim = ImageQt(imDisplay)
        pix = QtGui.QPixmap.fromImage(qim)

        return pix.copy()
Exemplo n.º 27
0
 def takeScreenShot(self):
     screen_rectangles = getDisplayRects()
     numberOfScreens = len(screen_rectangles)
     screenIndex = min(int(self.state["display_index"].get()) - 1, numberOfScreens)
     image = getRectAsImage(screen_rectangles[screenIndex]).convert("RGBA")
     self.state["imageProcessing"] = image
     self.state["imageProcessing"] = image
     self.mainCanvasFrame.text.delete("1.0", "end")
     self.attributes("-fullscreen", True)
     self.tkimage = ImageTk.PhotoImage(image)
     self.printScreenFrame.tkraise()
     self.printScreenFrame.overlayScreenLabel_canvas.config(width=screen_rectangles[screenIndex][0],
                                                            height=screen_rectangles[screenIndex][1])
     self.printScreenFrame.overlayScreenLabel_canvas.create_image(0, 0, anchor="nw", image=self.tkimage)
Exemplo n.º 28
0
def getSectors(tophalf=False): #this function will spit out data assuming that the second monitor (if there is one) is to the left of the main monitor, rever() the list if this is not the case
	sector = [] # list of PIL images of sectors of the screen (len should be equal to numLeds)
	halfnumLeds = numLeds / 2
	
	onescreen = False #temp variable...should really just add a parameter
	#check if we're dealing with one or two screens
	if (len(screens) > 1):
		(mainx1, mainy1, mainx2, mainy2) = screens[0]
		(secx1, secy1, secx2, secy2) = screens[1]
		sector_length = ( abs(mainx2) + abs(secx1) ) / (numLeds - 1)
	else:
		onescreen = True
		(mainx1, mainy1, mainx2, mainy2) = screens[0]
		sector_length = abs(mainx2) / (numLeds - 1)
	
	#sector_length = ( abs(mainx2) + abs(secx1) ) / (numLeds - 1) #Note: 3520/31=113 and this is not completely correct -- particulary for the last picture
	#may want to experiment with better sector_length values
	#sector_length = 120
	
	div = 1
	if tophalf:
		div = 2 # used in last parameter of getarea when appended to sector below
	
	
	
	for x in range(numLeds):
		if onescreen == False:
			if x * sector_length < abs(secx1): #left screen, screen[1]; a second monitor on the left will have a negative value
				sector.append(getRectAsImage( (secx1 + (x * sector_length), secy1, secx1 + (x * sector_length) + sector_length, secy2 / div)) )
			else: #right (main) screen, screen[0]
				#note that we can't use same maths as above, seeing as the right screen will start at screen coord 0,0 but x is at numLeds / 2
				sector.append(getRectAsImage( (0 + ((x - halfnumLeds) * sector_length), 0, 0 + ((x - halfnumLeds) * sector_length) + sector_length,  mainy2 / div)) )
				
		else: # only one screen
			sector.append(getRectAsImage( (0 + (x * sector_length), 0, 0 + (x * sector_length) + sector_length, mainy2 / div) ))
			
	return sector
Exemplo n.º 29
0
    def grab_screenshot(windows_name):
        """
        Grabs the current screenshot

        :param str windows_name: The windows name for the window, which can be find with Spy++
        :return: the screenshot
        :rtype: PIL.Image
        """
        hwnd_main = win32gui.FindWindow(None, windows_name)
        if not hwnd_main:
            print('window not found!')

        window_rect = win32gui.GetWindowRect(hwnd_main)
        #print(window_rect)
        src_image: Image = getRectAsImage(window_rect)
        return src_image
Exemplo n.º 30
0
    def is_game_started(self):
        im = getRectAsImage(self._display_rect)
        width, height = im.size

        # Check if game is started by detecting the word "Armistice" on screen
        # which shows on the screen at the beginning of the match before you drop
        armistice_crop_ratio = CropRatio(0.068, 0.720, 0.198, 0.761)
        preprocessed_im = self._preprocess_image(im, width, height,
                                                 armistice_crop_ratio, "start")
        read = pytesseract.image_to_string(preprocessed_im,
                                           lang="eng",
                                           config="--psm 8 --oem 3")

        if (read.lower().strip() == "armistice"):
            return True
        return False
Exemplo n.º 31
0
def main(addr):
    bridge = Bridge(addr)
    senderQ = Queue()
    senderP = Process(target=senderprocess, args=[bridge, senderQ])
    senderP.start()
    processpool, queuepool = list(
        zip(*map(lambda x: makeprocess(*x), zip(lights, [senderQ] * regions))))
    display = getDisplayRects()[1]

    # start = time.time()
    # for _ in range(15):
    while True:
        start = time.time()
        imDisplay = getRectAsImage(display)
        list(map(lambda x: x.put(imDisplay), queuepool))
        sleeptime = 0.2 - (time.time() - start)
        if sleeptime > 0:
            time.sleep(sleeptime)
Exemplo n.º 32
0
def getgamestate():
    px = getRectAsImage((0, 0, 1920, 1080))
    board_state = []
    current_active = None
    for y in range(0, 20):
        board_line = []
        for x in range(0, 10):
            ypos = startingY + int(deltaY * y)
            xpos = startingX + int(deltaX * x)
            #if(y == 2 and x > 2 and x < 7): #in the upper "attackers" zone
            #ypos -= 2
            #elif(y == 3 and x > 2 and x < 7): #in the lower "attackers" zone
            #ypos += 3
            if (y == 19):
                ypos -= 2
            pixelsamples = []
            for i in range(ypos - 7, ypos + 7):
                pixelsamples.append(px.getpixel((xpos, i)))
                px.load()[xpos, i] = (255, 0, 255)
            for j in range(xpos - 7, xpos + 7):
                pixelsamples.append(px.getpixel((j, ypos)))
                px.load()[j, ypos] = (255, 0, 255)
            blocktype = get_block(pixelsamples)
            #if(y < 2 and int(blocktype) < int(Block['ACTIVE_O'])):
            #blocktype = Block['EMPTY']
            if (blocktype >= int(Block['ACTIVE_O'])):
                current_active = int(blocktype)
            if (blocktype == Block['GHOST']
                    or int(blocktype) >= int(Block['ACTIVE_O'])
                    or blocktype == Block['EMPTY']):
                board_line.append(0)
            else:
                board_line.append(1)
        board_state.append(board_line)

    numpy_board_state = array(board_state)
    if (current_active != None):
        actual_queue = array([tetrominoQueue[current_active]])
        move = get_command(actual_queue, numpy_board_state)[0]
        print("Best move = {}".format(move))
        return move
    else:
        return [None, None]
	def test_1x1SizeRect(self):
		import Image

		fname = tempfile.mktemp() + '.bmp'
		fnamePng = tempfile.mktemp() + '.png'
		self.addCleanup(self._tryUnlink, fname)
		self.addCleanup(self._tryUnlink, fnamePng)
		saveRectToBmp(fname, rect=(100, 100, 101, 101))

		with open(fname, "rb") as f:
			im = Image.open(f)
			self.assertEqual((1, 1), im.size)

		im = getRectAsImage(rect=(100, 100, 101, 101))
		self.assertEqual((1, 1), im.size)
		im.save(fnamePng, format='png')

		with open(fnamePng, "rb") as f:
			im = Image.open(f)
			self.assertEqual((1, 1), im.size)
Exemplo n.º 34
0
    def getScreenDepthVis(self):

        #Get screenshot from the second monitotr and crop just the DepthVis
        for displayNumber, rect in enumerate(getDisplayRects(), 1):
            if displayNumber == 2:
                screen = getRectAsImage(rect)

        screen = np.array(screen)

        DepthVis = screen[762:1000,
                          98:525]  # NOTE: its img[y: y + h, x: x + w]

        DepthVis_gray = cv2.cvtColor(DepthVis, cv2.COLOR_BGR2GRAY)

        small = cv2.resize(DepthVis_gray, (0, 0), fx=0.5, fy=0.5)

        #cv2.imshow("Test", DepthVis_gray)
        #cv2.waitKey(0)

        return DepthVis_gray
Exemplo n.º 35
0
def compareImage(xoffset, yoffset, w, h, imageName):
    imgFromFile = cv2.imread(imageName)
    imTemp = getRectAsImage((gameX + xoffset, gameY + yoffset,
                             gameX + xoffset + w, gameY + yoffset + h))
    b, g, r = imTemp.split()
    imTemp = Image.merge("RGB", (r, g, b))
    imgToCheck = np.array(imTemp)

    #    if gameState == "predict":
    #        cv2.imshow(imageName,imgToCheck)
    #        cv2.resizeWindow(imageName, 400,100)

    difference = cv2.subtract(imgFromFile, imgToCheck)

    b, g, r = cv2.split(difference)
    bad = 0
    for value in np.ndenumerate(difference):
        if value[1] > 20:
            bad = bad + 1
    return bad
Exemplo n.º 36
0
 def take_screenshot(self, interval, screen_index):
     if self.screenshot_count == 0:
         self.screenshot_count = 1
     if self.running:
         file_path = self.entry_folder_select.get()
         file_name = self.entry_project_name.get() + '_' + str(
             self.screenshot_count) + '.png'
         if self.selected_screen.get(
         ) == 'Screen 1' or self.selected_screen.get() == 'Screen 2':
             screen = getRectAsImage(getDisplayRects()[screen_index])
             screen.save(os.path.join(file_path, file_name), format='png')
             self.screenshot_count += 1
             # ! change interval multiplication back to *60000
             self.callbacks.append(
                 self.root.after(
                     interval * self.MILLI_TO_MINS,
                     lambda: self.take_screenshot(
                         int(self.entry_interval.get()), screen_index)))
         elif self.selected_screen.get() == 'All screens':
             entire_screen = getScreenAsImage()
             entire_screen.save(os.path.join(file_path, file_name),
                                format='png')
             self.screenshot_count += 1
             self.callbacks.append(
                 self.root.after(
                     interval * self.MILLI_TO_MINS,
                     lambda: self.take_screenshot(
                         int(self.entry_interval.get()), screen_index)))
         elif self.selected_screen.get() == 'All screens (separate)':
             for screen_number, image in enumerate(getDisplaysAsImages(),
                                                   1):
                 file_name = self.entry_project_name.get() + '_' + str(self.screenshot_count)\
                             + '_screen' + str(screen_number) + '.png'
                 image.save(os.path.join(file_path, file_name),
                            format='png')
             self.screenshot_count += 1
             self.callbacks.append(
                 self.root.after(
                     interval * self.MILLI_TO_MINS,
                     lambda: self.take_screenshot(
                         int(self.entry_interval.get()), screen_index)))
 def _capFrame(self):
     if self.bbox is None:
         img = getScreenAsImage()
     else:
         img = getRectAsImage(self.bbox)
     return np.array(img)
 def nt_grab(bbox=None):
     if bbox is None:
         img = getScreenAsImage()
     else:
         img = getRectAsImage(bbox)
     return img
def saveArea(args):
	im = getRectAsImage(args)
	a = ImageOps.grayscale(im)
	a = array(a.getcolors())
	a = a.sum()
	im.save("C:/Users/AnthonyB/Desktop/python/"+str(a)+".png", 'PNG')
def grabArea(args):
	im = ImageOps.grayscale(getRectAsImage(args))
	a = array(im.getcolors())
	a = a.sum()
	return a
Exemplo n.º 41
0
def getpuzzleshot(xx, yy, endx, endy):
    ss = getRectAsImage(rect=(int(xx), int(yy), int(endx), int(endy)))
    return ss