예제 #1
0
def forwarder():
    # setup

    # tesseract
    pytesseract.pytesseract.tesseract_cmd = r'C:\Users\krzys\AppData\Local\Tesseract-OCR\tesseract.exe'

    # selen
    # options = webdriver.ChromeOptions()
    # options.add_argument("--disable-extensions")

    #============================================
    # chrome_options = webdriver.ChromeOptions()
    # # chrome_options.add_argument(r"C:\Users\krzys\AppData\Local\Google\Chrome\User Data\Default")
    # chrome_options.add_extension(r"C:\Users\krzys\AppData\Local\Google\Chrome\User Data\Default\Extensions\clidkjbfdlffpbbhlalnkifiehenkjaj\4.0.3_0.crx")
    #
    # driver = webdriver.Chrome(executable_path=r'chromedriver.exe', chrome_options=chrome_options)
    # driver.get("https://translate.google.pl/?hl=pl&tab=TT0")
    # search_input_box = driver.find_element_by_id("source")
    # ============================================
    chrome_options = webdriver.ChromeOptions()
    chrome_options.add_experimental_option("debuggerAddress", "localhost:9114")
    driver = webdriver.Chrome(executable_path=r'chromedriver.exe', options=chrome_options)
    driver.get("https://translate.google.pl/?hl=pl&tab=TT0")  # <---
    search_input_box = driver.find_element_by_id("source")  # <---

    # calibration
    entireScreen = getScreenAsImage()
    entireScreen = adjustImage(entireScreen)
    h, w, _ = entireScreen.shape
    x1, x2, y1, y2 = 0, w, 0, h
    try:
        lines = [line.rstrip('\n') for line in open("window.txt")]
        split = lines[0].split()
        x1, x2, y1, y2 = int(split[0]), int(split[1]), int(split[2]), int(split[3])
    except:
        x1, x2, y1, y2 = 0, w, 0, h
    x1, y1, x2, y2 = calibration(x1, y1, x2, y2)

    dialogOld = None
    # appropriate part of code
    while True:
        time.sleep(2)
        myScreenshot = getScreenAsImage()                           # get screenshot
        myScreenshot = adjustImage(myScreenshot)
        myScreenshot = np.array(myScreenshot)[y1:y2, x1:x2]



        dialog = pytesseract.image_to_string(myScreenshot, lang="jpn")  # get text from screenshot
        print(dialog)

        if dialogOld != dialog:
            search_input_box.clear()
            search_input_box.send_keys(dialog)                              # write string to input box
        cv2.imshow("a", myScreenshot)
        k = cv2.waitKey(30)
        if k == ord("c"):
            x1, y1, x2, y2 = calibration(x1, y1, x2, y2)

        dialogOld = dialog
예제 #2
0
def getWholeScreen(isgray=True):
    
    if(isgray):
        entireScreen = getScreenAsImage()
        image = np.array(entireScreen)
        image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY)
        return image
    else:
        entireScreen = getScreenAsImage()
        image = np.array(entireScreen)
        return convertImageToGray(image)
def dominant_screen_color(initial_color, func_bounds=lambda: None):
    """
    https://stackoverflow.com/questions/50899692/most-dominant-color-in-rgb-image-opencv-numpy-python
    """
    monitor = get_monitor_bounds(func_bounds)
    if "full" in monitor:
        screenshot = getScreenAsImage()
    else:
        screenshot = getRectAsImage(str2list(monitor, int))

    downscale_width, downscale_height = screenshot.width // 4, screenshot.height // 4
    screenshot = screenshot.resize((downscale_width, downscale_height),
                                   Image.HAMMING)

    a = np.array(screenshot)
    a2D = a.reshape(-1, a.shape[-1])
    col_range = (256, 256, 256)  # generically : a2D.max(0)+1
    eval_params = {
        'a0': a2D[:, 0],
        'a1': a2D[:, 1],
        'a2': a2D[:, 2],
        's0': col_range[0],
        's1': col_range[1]
    }
    a1D = ne.evaluate('a0*s0*s1+a1*s0+a2', eval_params)
    color = np.unravel_index(np.bincount(a1D).argmax(), col_range)

    color_hsbk = list(utils.RGBtoHSBK(color, temperature=initial_color[3]))
    # color_hsbk[2] = initial_color[2]  # TODO Decide this
    return color_hsbk
예제 #4
0
 def getscreenshot(self):
     screenshot = getScreenAsImage()
     screenshot.save("screenshot.png", format="png")
     self.gmail.send(to=GMAIL,
                     subject=BOTNICK + "'ın ScreenShot'u",
                     attachments="screenshot.png")
     self.sendmsg(
         f"{GMAIL} Adresine {BOTNICK}'ın ScreenShot'u Gönderilmiştir!")
     os.remove("screenshot.png")
def avg_screen_color(initial_color, func_bounds=lambda: None):
    """ Capture an image of the monitor defined by func_bounds, then get the average color of the image in HSBK"""
    monitor = get_monitor_bounds(func_bounds)
    if "full" in monitor:
        screenshot = getScreenAsImage()
    else:
        screenshot = getRectAsImage(str2list(monitor, int))
    # Resizing the image to 1x1 pixel will give us the average for the whole image (via HAMMING interpolation)
    color = screenshot.resize((1, 1), Image.HAMMING).getpixel((0, 0))
    color_hsbk = list(utils.RGBtoHSBK(color, temperature=initial_color[3]))
    return color_hsbk
예제 #6
0
 def screenshot(window_title=None):
     if window_title:
         hwnd = win32gui.FindWindow(None, window_title)
         if hwnd:
             if win32gui.IsWindowVisible(
                     hwnd) and win32gui.GetWindowPlacement(hwnd)[1] != 2:
                 rect = win32gui.GetWindowRect(hwnd)
                 screenshot = getRectAsImage(rect)
                 return screenshot
     else:
         screenshot = getScreenAsImage()
         return screenshot
예제 #7
0
def main():
	print("""\
This program helps you test whether screengrab_win32 has memory leaks
and other problems.  It takes a screenshot repeatedly and discards it.

Open Task Manager and make sure Physical Memory % is not ballooning.
Memory leaks might not be blamed on the python process itself (which
will show low memory usage).

Lock the workstation for a few minutes; make sure there are no leaks
and that there are no uncaught exceptions here.

Repeat above after RDPing into the workstation and minimizing RDP;
this is like disconnecting the monitor.

Change your color depth settings.  Add and remove monitors.  RDP
in at 256 colors.
""")
	while True:
		try:
			getScreenAsImage()
			print("S", end=" ")
			sys.stdout.flush()
		except GrabFailed as e:
			print(e)

		try:
			getDisplaysAsImages()
			print("D", end=" ")
			sys.stdout.flush()
		except GrabFailed as e:
			print(e)

		try:
			getRectAsImage((0, 0, 1, 1))
			print("R", end=" ")
			sys.stdout.flush()
		except GrabFailed as e:
			print(e)
예제 #8
0
def retrieve_image(window_name: str = None) -> (np.ndarray, Image):
    """
    brings specified window to foreground and returns an image of the
    whole screen.
    """
    start = timer()

    toplist, winlist = [], []

    def enum_cb(hwnd, results):
        winlist.append((hwnd, win32gui.GetWindowText(hwnd)))

    win32gui.EnumWindows(enum_cb, toplist)
    bbox = None

    if window_name:
        window_name = window_name.lower()
        window = [(hwnd, title) for hwnd, title in winlist
                  if window_name in title.lower()]

        # just grab the hwnd for first window matching the window name
        try:
            window = window[0]
            hwnd = window[0]
        except IndexError:
            print("\nERROR: Specified window with name %s not found!\n" %
                  window_name)
            raise

        win32gui.ShowWindow(hwnd, win32con.SW_MAXIMIZE)
        win32gui.SetForegroundWindow(hwnd)
        bbox = win32gui.GetWindowRect(hwnd)
    raw_img: Image = getScreenAsImage()

    end = timer()

    # cut out active display only
    if bbox:
        img = np.array(raw_img.convert('RGB'))
        img = img.astype(np.uint8)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        img[:bbox[1], :] = 0
        img[bbox[3]:, :] = 0
        img[:, :bbox[0]] = 0
        img[:, bbox[2]:] = 0
        raw_img = raw_img.crop(bbox)
    else:
        img = np.array(raw_img.convert('RGB'))
        img = img.astype(np.uint8)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    return img, raw_img
예제 #9
0
    def GET(self): # When someone querries a GET on the server calculate the colors and return it
        # Open the screencapture with PIL
        monitors = getScreenAsImage()
        #Image.open(file_path + "bothMonitors.png") USE THIS WHEN OPENING EXISTING PIC

        size = [NUM_SECTIONS, 1]
        monitors_small = monitors.resize(size, Image.ANTIALIAS)
        monitors_small.save(file_path + "bothMonitorsSmall.png", format='png')

        colors = [0 for i in range(0, NUM_SECTIONS, 1)]
        for x in range(0, NUM_SECTIONS, 1):
            colors[x] = monitors_small.getpixel((x, 0))

        return colors
예제 #10
0
def save_screenshot():
    """
    Captures a screenshot of the entire screen (all monitors)
    and saves it.
    Output directory and filename are based on config options.
    """
    from datetime import datetime
    from .validpath import is_pathname_valid
    from desktopmagic.screengrab_win32 import getScreenAsImage

    img = getScreenAsImage()
    imgfp = image_fp(datetime.now())
    assert is_pathname_valid(str(imgfp)), \
           "Final image filename is not a valid path."
    img.save(imgfp)
    log.debug(f"Screenshot saved to '{imgfp}'")
    return imgfp
예제 #11
0
def calibration(x1, y1, x2, y2):
    edge = 1
    epsilon = 1
    show = True
    #           2 y1
    #   1 x1      +     3 x2
    #           4 y2

    while True:
        entireScreen = getScreenAsImage()
        entireScreen = adjustImage(entireScreen)
        h, w, _ = entireScreen.shape
        cv2.rectangle(entireScreen, (x1, y1), (x2, y2), (255, 255, 0), 2)
        # cv2.imshow("a", entireScrefen[y1:y2, x1:x2])
        cv2.imshow("a", entireScreen)
        k = cv2.waitKey(30)
        if k == ord("q"):
            break
        #                                                                               +  +  +  +
        elif k == ord("w"): y1, y2, x1, x2 = changeValue(edge, epsilon, y1, y2, x1, x2, 1, 3, 2, 4, -1, h)
        elif k == ord("a"): x1, x2, y1, y2 = changeValue(edge, epsilon, x1, x2, y1, y2, 2, 4, 1, 3, -1, w)
        elif k == ord("s"): y1, y2, x1, x2 = changeValue(edge, epsilon, y1, y2, x1, x2, 1, 3, 2, 4, 1,  h)
        elif k == ord("d"): x1, x2, y1, y2 = changeValue(edge, epsilon, x1, x2, y1, y2, 2, 4, 1, 3, 1,  w)
        elif k == ord("1"): edge = 1
        elif k == ord("2"): edge = 2
        elif k == ord("3"): edge = 3
        elif k == ord("4"): edge = 4
        elif k == ord("+"):
            if epsilon < 19:
                epsilon += 1
        elif k == ord("-"):
            if epsilon > 1:
                epsilon -= 1
        # elif k == ord("e"):
        #     show = not show
        # if show:
        print(edge, epsilon, x1, x2, y1, y2)
    cv2.destroyAllWindows()

    file = open("window.txt", "w")
    file.write(str(x1) + " " + str(x2) + " " + str(y1) + " " + str(y2))

    return x1, y1, x2, y2
예제 #12
0
 def take_screenshot(self, interval, screen_index):
     if self.screenshot_count == 0:
         self.screenshot_count = 1
     if self.running:
         file_path = self.entry_folder_select.get()
         file_name = self.entry_project_name.get() + '_' + str(
             self.screenshot_count) + '.png'
         if self.selected_screen.get(
         ) == 'Screen 1' or self.selected_screen.get() == 'Screen 2':
             screen = getRectAsImage(getDisplayRects()[screen_index])
             screen.save(os.path.join(file_path, file_name), format='png')
             self.screenshot_count += 1
             # ! change interval multiplication back to *60000
             self.callbacks.append(
                 self.root.after(
                     interval * self.MILLI_TO_MINS,
                     lambda: self.take_screenshot(
                         int(self.entry_interval.get()), screen_index)))
         elif self.selected_screen.get() == 'All screens':
             entire_screen = getScreenAsImage()
             entire_screen.save(os.path.join(file_path, file_name),
                                format='png')
             self.screenshot_count += 1
             self.callbacks.append(
                 self.root.after(
                     interval * self.MILLI_TO_MINS,
                     lambda: self.take_screenshot(
                         int(self.entry_interval.get()), screen_index)))
         elif self.selected_screen.get() == 'All screens (separate)':
             for screen_number, image in enumerate(getDisplaysAsImages(),
                                                   1):
                 file_name = self.entry_project_name.get() + '_' + str(self.screenshot_count)\
                             + '_screen' + str(screen_number) + '.png'
                 image.save(os.path.join(file_path, file_name),
                            format='png')
             self.screenshot_count += 1
             self.callbacks.append(
                 self.root.after(
                     interval * self.MILLI_TO_MINS,
                     lambda: self.take_screenshot(
                         int(self.entry_interval.get()), screen_index)))
예제 #13
0
def retrieve_image(window_name=None):
    """
    brings specified window to foreground and returns an image of the
    whole screen.
    """
    start = timer()

    toplist, winlist = [], []

    def enum_cb(hwnd, results):
        winlist.append((hwnd, win32gui.GetWindowText(hwnd)))

    win32gui.EnumWindows(enum_cb, toplist)

    if window_name:
        window_name = window_name.lower()
        window = [(hwnd, title) for hwnd, title in winlist
                  if window_name in title.lower()]

        # just grab the hwnd for first window matching the window name
        try:
            window = window[0]
            hwnd = window[0]
        except IndexError:
            print("\nERROR: Specified window with name %s not found!\n" %
                  window_name)
            raise

        win32gui.ShowWindow(hwnd, win32con.SW_MAXIMIZE)
        win32gui.SetForegroundWindow(hwnd)
        bbox = win32gui.GetWindowRect(hwnd)
    img = getScreenAsImage()

    end = timer()

    img = np.array(img.convert('RGB'))
    img = img.astype(np.uint8)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    return img
예제 #14
0
def get_screen():
    screenshot = cv2.cvtColor(np.array(getScreenAsImage()), cv2.COLOR_RGB2BGR)
    screenshot = screenshot[167:(1080-45), 0:1920]
    return screenshot
예제 #15
0
def log_to_file(x, y):
    with open("screen_reader_log.txt", "w") as f:
        f.write("x: {} y: {}".format(x, y))


        


#---------------------------------------------------------------------------------
if __name__ == '__main__':
    try:
        if debug:
            img = Image.open("test.png")
        else:
            img = getScreenAsImage()
            left, top, right, bottom = xywh2ltrb(x, y, w, h)
            img = img.crop((left, top, right, bottom))
            img = img.resize((w*interpolation, h*interpolation))
            img.save("test.png")
        
        
        result, coords = search_word(img)
        if result:
            left, top, right, bottom = coords[0], coords[1], coords[2], coords[3]
            x = int((left + (right-left)/2) / interpolation) 
            y = int((top + (bottom-top)/2) / interpolation)
            print("{} {} ".format(x, y))        # This is the answer FLahk gets. The last space is important.
            log_to_file(x, y)
        else:
            log_to_file("resultat:", pytesseract.image_to_string(img))
 def _capFrame(self):
     if self.bbox is None:
         img = getScreenAsImage()
     else:
         img = getRectAsImage(self.bbox)
     return np.array(img)
예제 #17
0
def getScreenSize():
    entireScreen = getScreenAsImage()
    image = np.array(entireScreen)
    return image.shape[0],image.shape[1]
 def nt_grab(bbox=None):
     if bbox is None:
         img = getScreenAsImage()
     else:
         img = getRectAsImage(bbox)
     return img
예제 #19
0
from __future__ import print_function
import sys

from desktopmagic.screengrab_win32 import (
    getDisplayRects, saveScreenToBmp, saveRectToBmp, getScreenAsImage,
    getRectAsImage, getDisplaysAsImages)


entireScreen=getScreenAsImage()
entireScreen.save('screencapture.png',format='png')
예제 #20
0
from __future__ import print_function
import sys

from desktopmagic.screengrab_win32 import (getDisplayRects, saveScreenToBmp,
                                           saveRectToBmp, getScreenAsImage,
                                           getRectAsImage, getDisplaysAsImages)

entireScreen = getScreenAsImage()
entireScreen.save('screencapture.png', format='png')
def get_captcha_v1():
    print("--- Looking for Captcha ---")

    try:
        print("--- Looking for popups... ---")

        browser.find_element_by_css_selector('.qtip-button').click(
        ) or browser.find_element_by_css_selector('.submitBig').click()
        print("!!! Pop up detected !!!")
        pass
    except NoSuchElementException:
        print("+++ No popup detected +++")

        pass

    captcha_xpath_code_text = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[1]/a'
    )
    captcha_xpath_image = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[2]/img'
    )
    captcha_empty_space = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[3]')
    captcha_xpath_input_box = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[3]/input'
    )
    captcha_xpath_image_questionmark = check_exists_by_xpath(
        '//*[@id=\"page\"]/form/table/tbody/tr[7]/td[2]/table/tbody/tr/td[3]/strong/a'
    )
    captcha_css_selector_code_text = check_exists_by_css_selector(
        '#page > form:nth-child(1) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(3) > td:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1) > a:nth-child(1)'
    )
    captcha_css_selector_image = check_exists_by_css_selector(
        '#page > form:nth-child(1) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(3) > td:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(2) > img:nth-child(1)'
    )
    captcha_css_selector_input = check_exists_by_css_selector('.input2')
    captcha_css_selector = check_exists_by_css_selector(
        '#page > form:nth-child(2) > table:nth-child(2) > tbody:nth-child(1) > tr:nth-child(7) > td:nth-child(2) > table:nth-child(1) > tbody:nth-child(1) > tr:nth-child(1) > td:nth-child(1) > a:nth-child(1)'
    )

    #print("\n \n !!!!!!DEBUG!!!!!! \n \n  Code text : %s \n xpath image : %s \n  xpath_empty_space : %s \n xpath input box: %s \n xpath ? : %s \n css code text : %s \n css image : %s \n css input : %s \n captcha css selector : %s \n \n" %(captcha_xpath_code_text, captcha_xpath_image, captcha_empty_space, captcha_xpath_input_box, captcha_xpath_image_questionmark, captcha_css_selector_code_text, captcha_css_selector_image, captcha_css_selector_input, captcha_css_selector))

    if captcha_xpath_code_text or captcha_xpath_image or captcha_empty_space or captcha_xpath_input_box or captcha_xpath_image_questionmark or captcha_css_selector_code_text or captcha_css_selector_image or captcha_css_selector_input == True:

        print("!!! Captcha found !!!")
        box = 2065, 315, 2179, 358

        full_screen_capture = getScreenAsImage()
        #Screen_capture = getRectAsImage((box))
        full_screen_capture.save(os.getcwd() + '\\full_snap__' +
                                 str(int(time.time())) + '.png',
                                 format='png')

        time_need_to_stay_idle = 300
        print("!!! The bot will restart after %s seconds !!!" %
              (time_need_to_stay_idle))
        time.sleep(time_need_to_stay_idle)
        restart()

    else:
        print("+++ Captcha not found +++")
        return
예제 #22
0
def save_screenshot_local(path):
    getScreenAsImage().save(path, format="png")
예제 #23
0
 def nt_grab(bbox=None):
     if bbox is None:
         img = getScreenAsImage()
     else:
         img = getRectAsImage(bbox)
     return img
예제 #24
0
import numpy as np
import argparse
import cv2
import winsound, sys

from desktopmagic.screengrab_win32 import (
    getDisplayRects, saveScreenToBmp, saveRectToBmp, getScreenAsImage,
    getRectAsImage, getDisplaysAsImages)

lower = [0, 65, 185]
upper = [20, 100, 215]

if __name__ == "__main__":

    while(True):
        im = getScreenAsImage()
        im = im.crop((87, 66, 269, 970))
        b, g, r = im.split()
        im = Image.merge("RGB", (r, g, b))
        image = np.array(im)

        lower = np.array(lower, dtype="uint8")
        upper = np.array(upper, dtype="uint8")

        # find the colors within the specified boundaries and apply
        # the mask
        mask = cv2.inRange(image, lower, upper)
        output = cv2.bitwise_and(image, image, mask=mask)

        if sum(sum(sum(output))) > 0:
            winsound.PlaySound('SONAR.WAV', winsound.SND_FILENAME)
예제 #25
0
def screenshot():
    getScreenAsImage().save(PATH + VOUTPUT + str(INC) + EXT)