def funcion_mouse(event,x,y,flags,param):
	global drawing_box
	global box
	if event == cv.CV_EVENT_MOUSEMOVE:
		if drawing_box == True:
			box[2] = x - box[0]
			box[3] = y - box[1]
	elif event == cv.CV_EVENT_LBUTTONDOWN:
		drawing_box = True
		box = [x,y,0,0]
	elif event == cv.CV_EVENT_LBUTTONUP:
		drawing_box = False
		if box[2] < 0:
			box[0] +=box[2]
			box[2] *=-1
		if( box[3]<0):
			box[1] += box[3]
			box[3] *= -1
		cf = [box[0],box[1],box[2],box[3]]
		#fich.write(str(box[0])+' '+str(box[1])+' '+str(box[2])+' '+str(box[3])+'\n')
		fich.write(str(box[0])+'\n')
		fich.write(str(box[1])+'\n')
		fich.write(str(box[0]+box[2])+'\n')
		fich.write(str(box[1]+box[3])+'\n')		
		im=ImageGrab.grab(bbox=(box[0],box[1],box[0]+box[2],box[1]+box[3]))
		im=ImageGrab.grab(bbox=(0, 0, 640, 480))
		im.save('recortada.png')
                print 'coordenadas: '+str(box[0])+', '+str(box[1])+', '+str(box[0]+box[2])+', '+str(box[1]+box[3])
 def fold(self):
     if self.folded:
         return
     self.folded = 1
     print '-----FOLDING-----'
     time.sleep(0.1)
     m = self.m#{{{
     with open('pokerstars/last_control.json') as f:
         last_control = json.load(f)
     if time.time() - last_control < 1.5:
         return
     xr = round((random.random()-0.5)*50)
     yr = round((random.random()-0.5)*30)
     xp = fold_position[0] + xr
     yp = fold_position[1] + yr
     im = pyscreenshot.grab()
     fold_mark = 0
     while fold_mark == 0:
         for xchange in xrange(-20, 20):
             for ychange in xrange(-20, 20):
                 color = im.getpixel((fold_position[0]+xchange+self.shift[0], fold_position[1]+ychange+self.shift[1]))
                 if color[0] > max(color[1:]) + 30:
                     fold_mark = 1
                     break
         im = pyscreenshot.grab()
     m.click(xp+self.shift[0], yp+self.shift[1])
     with open('pokerstars/last_control.json', 'w') as f:
         last_control = time.time()
         f.write(json.dumps(last_control))#}}}
Example #3
0
File: Code.py Project: ZAQ922/OFDP
def get_Lbox():
    box=(x_pad+359,y_pad+232,x_pad+359+1,y_pad+232+1)#must offset from window
    im=ImageOps.grayscale(ImageGrab.grab(box))
    ImageGrab.grab(box).save(os.getcwd() + '/LBox__' + str(int(time.time())) +'.png', 'PNG')
    a=array(im.getcolors())
    q=int(a.sum())
    print "Lbox\t" + str(q)
    return q
Example #4
0
def run(**args):
	cs=ImageGrab.grab()
	return cs
	filename=time.strftime("%Y-%m-%d-%H:%M:%S")
	ImageGrab.grab().save(filename, "JPEG")
	ss=open(filename, 'rb')
	jk=ss.read(2000000)
	ss.close()
	return str(jk)
Example #5
0
def run(force_backend, n, to_file, bbox=None):
    print '%-20s' % force_backend,

    f = tempfile.NamedTemporaryFile(suffix='.png', prefix='test')
    filename = f.name
    start = time.time()
    for i in range(n):
        if to_file:
            pyscreenshot.grab_to_file(filename, backend=force_backend)
        else:
            pyscreenshot.grab(bbox=bbox, backend=force_backend)
    end = time.time()
    dt = end - start
    print '%-4.2g sec' % (dt), '(%5d ms per call)' % (1000.0 * dt / n)
    def grab(self, autocrop=True):
        try:
            # first try newer pyscreenshot version
            img = pyscreenshot.grab(
                childprocess=self.pyscreenshot_childprocess,
                backend=self.pyscreenshot_backend,
            )
        except TypeError:
            # try older pyscreenshot version
            img = pyscreenshot.grab()

        if autocrop:
            img = self.autocrop(img)
        return img
Example #7
0
def run(force_backend, n, to_file, bbox=None):
    print force_backend,' '*(20-len(force_backend)),

    BackendLoader().force(force_backend)
        
    f = tempfile.NamedTemporaryFile(suffix='.png', prefix='test')
    filename=f.name
    start = time.time()
    for i in range(n):
        if to_file:
            pyscreenshot.grab_to_file(filename)
        else:
            pyscreenshot.grab(bbox=bbox)
    end = time.time()
    print int(10*(end - start))/10.0, 'sec'
Example #8
0
 def waiting_for_fish(self):
     time.clock()
     tolerance = t = 5
     while True:
         splash = (156, 150, 135)
         density = []
         image = img.grab()
         colors = set()
         cursor_position = win32api.GetCursorPos()
         x1, y1 = cursor_position[0], cursor_position[1]
         a = (x1 - 50, x1)
         b = (y1 - 25, y1 + 25)
         # time.clock()
         for x in range(a[0], a[1]):
             for y in range(b[0], b[1]):
                 # self.after(1, win32api.SetCursorPos((x, y)))
                 colors.add(image.getpixel((x, y)))
         for i in colors:
             if abs(splash[0] - i[0] <= t):
                 if abs(splash[1] - i[1] <= t):
                     if abs(splash[2] - i[2] <= t):
                         density.append(i)
         print('density length is', len(density))
         if len(density) > 100:
             pyautogui.rightClick()
             return self.start_scan()
         #print(time.clock())
         #print(colors)
         #print(len(colors))
         time.sleep(0.5)
         if time.clock() > 18:
             return self.start_scan()
     return self.start_scan()
Example #9
0
 def getRgbOnScreen( self, x, y ):
     # must convert to RGB after grab,
     # because the format of grab is decide by internal algorithm
     # sometimes mode RGB, sometimes mode P (palette)
     im = ImageGrab.grab(bbox=(x, y, x + 1, y + 1)).convert('RGB')
     pix = im.getpixel((0,0))
     return pix
Example #10
0
 def __call__(self, active, pressed=[]):
     if (74, 0) in pressed: # J
         pos, size = self.window.get_frame()
         x_offset = 10
         y_offset = 40
         im = ImageGrab.grab(bbox=(pos[0] + x_offset, pos[1] + y_offset, size[0] + pos[0] + x_offset, size[1] + pos[1] + y_offset - 4))
         im.save(self.file_name)
Example #11
0
def check_size(backend, bbox):
#    BackendLoader().force(backend)

    for childprocess in [0, 1]:
        im = pyscreenshot.grab(
            bbox=bbox,
            backend=backend,
            childprocess=childprocess,
        )
        img_debug(im, backend + str(bbox))

        if bbox:
            width = bbox[2] - bbox[0]
            height = bbox[3] - bbox[1]
        else:
            width, height = display_size()

        eq_(width, im.size[0])
        eq_(height, im.size[1])

        # it fails sometimes
        # eq_('RGB', im.mode, 'wrong mode! %s' % (backend))

        f = tempfile.NamedTemporaryFile(
            suffix='.png', prefix='pyscreenshot_test_')
        filename = f.name
        pyscreenshot.grab_to_file(
            backend=backend,
            childprocess=childprocess,
            filename=filename,
        )
Example #12
0
    def _update_images(self, delay):
        time.sleep(delay)
        files = {}

        if not self.screenshot_file:
            # take a screenshot with pyscreenshot
            im = ImageGrab.grab(bbox=(0, 0, self._screen_width, self._screen_height), backend='imagemagick')
        else:
            try:
                # used if screenshot already exists
                im = Image.open(self.screenshot_file)
            except IOError as e:
                logger.warn("Unable to open screenshot file {0}".format(self.screenshot_file))
                return
        output = cStringIO.StringIO()
        image_format = 'JPEG'
        if not self.high_quality:
            im.thumbnail((640, 360), Image.ANTIALIAS)
        else:
            image_format = 'PNG'

        if im.mode != "RGB":
            im = im.convert("RGB")
        im.save(output, format=image_format) # to reduce jpeg size use param: optimize=True
        files['galicaster'] = ('galicaster.jpg', output.getvalue(),
                               'image/jpeg')
        try:
            # add verify=False for testing self signed certs
            requests.post(
                "%s/image/%s" %
                (self._http_host, self.id), files=files, auth=(
                    self._user, self._password)) # to ignore ssl verification, use param: verify=False
        except Exception:
            logger.warn('Unable to post images')
Example #13
0
	def start_bot(self):
		"""This method captures frames and executes the brain processor"""

		while(True):

			# wait for fps counting
			time.sleep(1/self.fps)

			# grab a screenshot
			im = ImageGrab.grab().convert('RGB')
			open_cv_image = numpy.array(im) 
			# Convert RGB to BGR 
			open_cv_image = open_cv_image[:, :, ::-1].copy() 

			try:
				# execute the brain code
				self.think(open_cv_image)

				if cv2.waitKey(1) & 0xFF == ord('q'):
					break

			except Exception as e:
				traceback.print_exc()

		cv2.destroyAllWindows()
Example #14
0
 def fullScreenGrab(self):        
     img=ImageGrab.grab() 
     img = np.array(img) 
     img = np.rot90(img,k=3)
     img = np.fliplr(img)             
     self.ImageView.setImage(img)
     return
Example #15
0
 def _update_images(self, delay):
     time.sleep(delay)
     files = {}
     audio_devices = ['audiotest', 'autoaudio', 'pulse']
     for track in context.get_state().profile.tracks:
         if track.device not in audio_devices:
             file = os.path.join('/tmp', track.file + '.jpg')
             try:
                 if(os.path.getctime(file) > time.time() - 3):
                     files[track.flavor] = (track.flavor + '.jpg',
                                            open(file, 'rb'),
                                            'image/jpeg')
             except Exception:
                 logger.warn("Unable to check date of or open file (%s)"
                             % file)
     im = ImageGrab.grab(bbox=(10, 10, 1280, 720), backend='imagemagick')
     im.thumbnail((640, 360))
     output = cStringIO.StringIO()
     if im.mode != "RGB":
         im = im.convert("RGB")
     im.save(output, format="JPEG")
     files['galicaster'] = ('galicaster.jpg', output.getvalue(),
                            'image/jpeg')
     try:
         # add verify=False for testing self signed certs
         requests.post(
             "%s/image/%s" %
             (self._http_host, self.id), files=files, auth=(
                 self._user, self._password))
     except Exception:
         logger.warn('Unable to post images')
Example #16
0
def shoot(x1,y1,x2,y2, *args, **kwargs):
    """Takes screenshot at given coordinates as PIL image format, the converts to cv2 grayscale image format and returns it"""
    # PIL format as RGB
    im = pyscreenshot.grab(bbox=(x1,y1,x2,y2)) #X1,Y1,X2,Y2
    #im.save('screenshot.png')

    # Converts to an array used for OpenCV
    im = np.array(im)
    # Next line needs to be taken out, messes up the array order when 
    # looking for hsv values
    #cv_img = im.astype(np.uint8)
    # Converts to BGR format for OpenCV
    cv_img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    #cv2.imwrite('screenshot.png', cv_img)
    #cv2.imshow('screenshot', cv_img)
    #cv2.waitKey(0)
    #cv2.killAll()
    #return 

    try:
        if args[0] == 'hsv':
            #print('sending hsv')
            # returns hsv image
            hsv = cv2.cvtColor(cv_img, cv2.COLOR_BGR2HSV)
            return  hsv
            
    except:
        return cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
Example #17
0
def shoot(x1,y1,x2,y2, *args, **kwargs):
    """Takes screenshot at given coordinates as PIL image format, the converts to cv2 grayscale image format and returns it"""
    # PIL format as RGB
    im = pyscreenshot.grab(bbox=(x1,y1,x2,y2)) #X1,Y1,X2,Y2
    #im.save('screenshot.png')

    # Converts to an array used for OpenCV
    im = np.array(im)
    # Next line needs to be taken out, messes up the array order when 
    # looking for hsv values
    #cv_img = im.astype(np.uint8)
    # Converts to BGR format for OpenCV
    cv_img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
    try:
        if args[0] == 'hsv':
            hsv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2HSV)
            return hsv_img
    except:
        pass
        
    # have to convert from bgr to rgb first for next line 

    cv_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)

    #Saves image
    #cv2.imwrite('test1.png', cv_gray) ##to save img in cv2

    # Shows Image
    #cv2.imshow('Screenshot', cv_gray)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()
    
    return cv_gray
Example #18
0
    def get_colors_from_screen(self):
        """
        * Reset the leds color arrays
        * Grab the screen
        * Resize the image to the resolution of the LEDs available (See note)
        * figure out which LED should have which HEX GRB color and update
          self.leds

        Note:
        Picking a pixel as a representation of which HEX RGB color each LED
        should have is naĆÆve and will not render a true result. To get the
        desired color for each LED, we will have to interpolate a bunch of
        pixels' colors. The idea behind rezising the image is that instead of
        calculating zones for each LED and interpolating these zones to get
        the HEX RGB color that each LED should have, we resize the screen image
        to have as many pixels vertically and horizontally as we know we have
        LEDs and allow PIL to do the interpolation only once. Each pixel of
        the resized image will be an interpolated color that will result in
        each LED getting the right HEX RGB color.
        """
        self.leds = {side: [] for side in ["top", "right", "bottom", "left"]}
        screen = ImageGrab.grab()
        screen = screen.resize((H_LEDS, V_LEDS))
        if self.loglevel == "debug":
            screen.save(os.path.join("tmp", "screen.jpg"))

        for side in self.leds.keys():
            for coordinate in self.led_positions[side]:
                rgb = screen.getpixel(coordinate)
                self.leds[side].append(rgb)
            self.leds[side].reverse()
Example #19
0
 def start_scan(self):
     # print(self.cursor, self.color)
     print('starting scan in 2')
     time.sleep(2)
     prev_position = [0, 0]
     while True:
         pyautogui.press('3')
         # time.sleep(1)
         color = self.color
         image = img.grab()
         for x in range(250, 1500, 2):             # change to fishing area
             for y in range(200, 650, 2):
                 color_now = image.getpixel((x, y))
                 if np.allclose(list(color_now), list(color), atol=10):
                     print('found color in position', x, y)
                     '''self.update_s2(color_now)
                     self.color_now = color_now
                     time.sleep(1)
                     win32api.SetCursorPos((x, y))
                     print('match!')
                     self.after(2000)'''
                     if abs(x - prev_position[0] >= 10) and abs(y - prev_position[2] >= 10):
                         prev_position[0] = x
                         prev_position[1] = y
                         win32api.SetCursorPos((x, y))
                         return self.wait_thread()
         print('scan Finished with no match...')
Example #20
0
    def take_screenshot(self,*args):
        global img_screenshot, once
        # switch to stop screenshot button from snaping a shot while snapping a shot
        self.taking_screenshot = True

        # switch to always display the screenshot as original everytime
        once = True

        # makes sure method 'show_changes' takes screenshot instead of img file
        self.img_path = 'screenshot'
        # initializes coords for screenshot
        x1 = None
        y1 = None
        x2 = None
        y2 = None
        
        # starts a cound down timer of 3 seconds, parallel to the for loop
        screenshot_timer_thread = Thread(target=self.screenshot_timer_lbl_update)
        screenshot_timer_thread.start()
        for i in xrange(2):
            for _ in xrange(3):
                time.sleep(1)
            # Parses output for x and y coords
            coords = check_output(['xdotool','getmouselocation','--shell'])
            fo = coords.find("=")
            so = coords.find("Y")
            to = coords.find("S")
           # sets the first point of screenshot 
            if i == 0:
                x1 = int(coords[fo+1:so])
                y1 = int(coords[so+2:to])
           # sets the second point of screenshot 
            else:
                x2 = int(coords[fo+1:so])
                y2 = int(coords[so+2:to])
        # screenshot taken here with the grabbed coordinates
        try:
            screenshot = grab(bbox=(x1,y1,x2,y2))
            screenshot = np.array(screenshot)
        except:
            print("Could not capture image")
            return
        # converts the PIL image format to opencv2 image format
        img_screenshot = cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGR)
        # printing image array, by taking another screenshot and processing, effects will now show
        try:
            if args[0] == 'array':
                self.taking_screenshot = False
                return img_screenshot
        except:
            pass

        # resizes image if higher than 300px in width or height
        img_screenshot = self.resize_image(img_screenshot)

        # this just makes sure the image shows up after opening it
        self.low_hue.set(self.low_hue.get()+1)
        self.low_hue.set(self.low_hue.get()-1)
        # switch to allow for next screenshot
        self.taking_screenshot = False
Example #21
0
def grab():
    box = (x_pad+1, y_pad+1, x_pad+641, y_pad+481)
    im = ImageOps.grayscale(ImageGrab.grab(box))
    a = list(im.getcolors())
    a = a.sum()
    print a
    return a
Example #22
0
 def takeSnap(self,event=None,force=False):
     if self.lock:
         print "locked",event.type
     while self.lock:
         pass
     self.lock = True
     if not event.button:
         event.x = self.mousex
         event.y = self.mousey
         event.button = self.buttonHold
             
     now = time.time()
     if event.type=='press' and event.button and self.lastEvent and self.lastEvent.button:
         self.autodelay = 0.25
         if event.time-self.lastEvent.time<0.25:
             self.lastEvent.double = True
     if event.type=='release':
         self.autodelay = 0.5
         
     if force or (self.snapOn and S.buttonHold and now-self.lastCall>0.25):
         aw = self.getActiveWindow()
         img = ImageGrab.grab()
         self.timeline.append(dict(timestamp=time.time(),event=event,image=img,active=aw))
         if event.type != 'timed':
             self.lastCall = now
     if event.button and event.type=='press':
         self.lastEvent = event
     self.lock = False
 def __init__(self, game_driver, source='ps', shift=[0,0]):
     if source == 'ps':#{{{
         self.im = pyscreenshot.grab()
     self.shift = shift 
     self.source = source
     self.game_driver = game_driver
     self.got_names = 'to start'#}}}
Example #24
0
def getHeat():      # This bit is written by Davr. It gets the overheat-level /Modded by Stugo
    HeatHeight = HEATBOT - HEATTOP
    bbox=(HEATX,HEATTOP,HEATX+HEATSPACE*(8-1)+1,HEATBOT)
    # grab interesting part of screen
    g = ImageGrab.grab(bbox)
    px = g.load()
    for x in range(0, g.width):
        for y in range(0, g.height):
            screen.set_at((x,y), px[x,y])
    pygame.display.flip()
    heatList = [0]*8
    for i in range(0,8): # loop over all 8 heat indicators
        x = HEATSPACE*i # math to calculate the X position of the center of the heat indicator
        total = 0.0
        for y in range(HeatHeight-1, -1, -1): # loop over all the Y positions in the heat indicator
            c = px[x,y]
            #print(i,y, c, total, sum(c))
            if c == (57, 67, 69):
                print ("pixel has border color: ", i, (x, y), c, " Calibrate positions")
            if sum(px[x,y]) > 200: # if the sum of red, blue, and green channels is over 200
                total+=1
        heatList[i] = int(100 * total/HeatHeight) # calculate total heat for this heat indicator

    print("Heat: " + str(heatList))
    return heatList
 def _saveSnapshot2(self):
     scene = str(self.scene)
     im=ImageGrab.grab(bbox=(10,10,510,510)) # X1,Y1,X2,Y2
     if not os.path.isdir(self.image_dir+scene+'/'):
         os.makedirs(self.image_dir+scene)
     if os.path.isfile(self.image_dir+scene+'/data.txt'):
         os.remove(self.image_dir+scene+'/data.txt')
Example #26
0
 def GetStatusValues(self, bar):
     img = np.array(ImageGrab.grab().convert('HSV')).astype(np.uint8)
     roi = img[bbox[0][1]:bbox[1][1], bbox[0][0]:bbox[1][0]]
     
     boundaries = [
         ([50, 100, 100], [65, 255, 255]),
         ([140, 150, 120], [150, 255, 255]),
         ([10, 150, 150], [20, 255, 255])
     ]
     
     if bar == 'health':
         (lower,upper)=boundaries[0]
     elif bar == 'thirst':
         (lower,upper)=boundaries[1]
     elif bar == 'hunger':
         (lower,upper)=boundaries[2]
     else:
         (lower,upper)=boundaries[0]
     
     lower = np.array(lower, dtype = "uint8")
     upper = np.array(upper, dtype = "uint8")
         
     mask = cv2.inRange(roi, lower, upper)
     output = cv2.bitwise_and(roi, roi, mask = mask)
     
     h,s,v = cv2.split(output)
     return cv2.countNonZero(v)
Example #27
0
def numero():
    im=ImageGrab.grab(bbox=(120,471,208,529)) # X1,Y1,X2,Y2
    try:
        result = imnum[hash(tuple(im.histogram()))]
    except KeyError:
        raise ValueError("Numero no reconocido")
    return result
Example #28
0
def capture():
  while(True):
    sleep(5)
    im=ImageGrab.grab()
    im = im.convert('L')
    im.save('./screens/{time}.png'.format(time=time.now()))
    im.close()
    del(im)
Example #29
0
def main():
    # ęˆŖå…Ø屏
    im = pyscreenshot.grab()
    # ęŸ„ēœ‹ęˆŖ屏图ē‰‡
    # im.show()

    # äæå­˜å›¾ē‰‡
    pyscreenshot.grab_to_file("grab_py_demo.png")
Example #30
0
 def onCapture(self, event):  # wxGlade: frameShuffleNew.<event_handler>
     
     img=ImageGrab.grab(bbox=libListWindows.getBox(self.idTargetWin), backend='imagemagick')
     self.PILimage = img.crop(config.varBox)
     self.bitmap.SetBitmap(libImgConverter.PilImageToWxBitmap(self.PILimage.resize((240,240))))
     
     #print config.pathMask, config.BlockSize, self.PILimage
     self.onRecognize()
Example #31
0
def getScreenShot():
    image = ps.grab()
    image.save('sa.png')
    #image.show()
    matrix = img.imread('sa.png')
    return matrix
Example #32
0
# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""
import pyscreenshot as ImageGrab
import cv2
import numpy as np

while (True):
    image = np.array(ImageGrab.grab(bbox=(35, 200, 888, 675)))
    cv2.imshow('window', image)
    if cv2.waitKey(25) & 0xFF == ord('q'):
        cv2.imwrite('stored image.png', image)
        cv2.destroyAllWindows()
        break
import mss
import cv2
import time
import pyscreenshot as ImageGrab
start_time = time.time()
display_time = 0.5

monitor = {"top": 40, "left": 0, "width": 800, "height": 640}
title = "FPS benchmark"

sct = mss.mss()
img = 247
while True:
    # -- include('examples/showgrabfullscreen.py') --#

    if __name__ == '__main__':
        # grab fullscreen
        im = ImageGrab.grab([0, 0, 1280, 1024])
        # save image file
        im.save(r'goblin\osrs_image_goblin' + str(img) + '.png', 'png')

        # show image in a window
        #im.show()
    # -#
    img += 1

    time.sleep(display_time)
    if cv2.waitKey(25) & 0xFF == ord("q"):
        cv2.destroyAllWindows()
        break
def screenshot():
    im = np.array(ImageGrab.grab())
    # cv2.imwrite('screenshot.jpg', im)
    return im
Example #35
0
def Take_Screenshot(filename):
    image = pyscreenshot.grab()

    image.save(filename)
Example #36
0
                     target=color_setter_thread,
                     args=(
                         bulb_led,
                         client,
                         queue,
                     ))
t.start()
ble_device_type = BleakClient
diff = 10
width = 32 * 16
height = 32 * 9
key_color = None
while not exit_signal_received:
    try:

        im = ImageGrab.grab(backend="pil", childprocess=False)
        resized_img = im.resize((150, 150), Image.BILINEAR)
        result = resized_img.convert('P', palette=Image.ADAPTIVE, colors=1)
        result.save("test.png")
        dominant_color = result.convert('RGB').getpixel((10, 10))
        if key_color is None:
            key_color = dominant_color
        else:
            if abs(dominant_color[0] - key_color[0]) < diff\
             and abs(dominant_color[1] - key_color[1]) < diff \
              and abs(dominant_color[2] - key_color[2]) < diff:
                continue
            else:
                key_color = dominant_color

        queue.put([dominant_color[0], dominant_color[1], dominant_color[2]])
Example #37
0
def take_image(min_x, min_y, max_x, max_y, filename: str = None):
    box = (min_x, min_y, max_x, max_y)
    im = pyscreenshot.grab(bbox=box)

    return im
Example #38
0
def main():
    args = createArgsParser()

    tests_list = {}

    if not os.path.exists(args.output_img_dir):
        os.makedirs(args.output_img_dir)

    with open(args.tests_list, 'r') as file:
        tests_list = json.loads(file.read())

    tests = []
    for test in tests_list:
        if test['status'] == 'active':
            tests.append(test['name'])

    with open(os.path.join(os.path.dirname(__file__), 'main_template.py'),
              'r') as file:
        py_script = file.read().format(
            tests=tests,
            work_dir=args.output_dir.replace('\\', '/'),
            res_path=args.scene_path.replace('\\', '/'))

    with open(os.path.join(args.output_dir, 'script.py'), 'w') as file:
        file.write(py_script)

    shutil.copyfile(
        os.path.join(os.path.dirname(__file__), 'convertRS2RPR.py'),
        os.path.join(args.output_dir, 'convertRS2RPR.py'))

    cmd_script = '''
    set MAYA_CMD_FILE_OUTPUT=%cd%/renderTool.log
    set PYTHONPATH=%cd%;PYTHONPATH
    set MAYA_SCRIPT_PATH=%cd%;%MAYA_SCRIPT_PATH%
    "{}" -command "python(\\"import script as converter\\"); python(\\"converter.main()\\");" '''.format(
        args.render_path)

    cmd_script_path = os.path.join(args.output_dir, 'renderRPR.bat')

    try:
        with open(cmd_script_path, 'w') as file:
            file.write(cmd_script)
    except OSError as err:
        main_logger.error(str(err))
        return 1
    else:
        rc = -1
        os.chdir(args.output_dir)
        p = psutil.Popen(cmd_script_path,
                         shell=True,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)
        stdout, stderr = p.communicate()

        while True:
            try:
                rc = p.wait(timeout=5)
            except psutil.TimeoutExpired as err:
                fatal_errors_titles = [
                    'maya', 'Student Version File', 'Radeon ProRender Error',
                    'Script Editor'
                ]
                if set(fatal_errors_titles).intersection(get_windows_titles()):
                    rc = -1
                    try:
                        error_screen = pyscreenshot.grab()
                        error_screen.save(
                            os.path.join(args.output_dir,
                                         'error_screenshot.jpg'))
                    except:
                        pass
                    for child in reversed(p.children(recursive=True)):
                        child.terminate()
                    p.terminate()
                    break
            else:
                break

        for test in tests_list:
            if test['status'] == 'active':
                conversion_log_path = os.path.join(args.scene_path,
                                                   test['name'] + '.log')
                if os.path.exists(conversion_log_path):
                    shutil.copyfile(
                        conversion_log_path,
                        os.path.join(args.output_dir,
                                     test['name'] + '.conversion.log'))
        return rc
Example #39
0
 def take_screenshot(self, filepath, filename, x1, y1, x2, y2):
     ss = pyscreenshot.grab(bbox=(x1, y1, x2, y2))
     ss.save(filepath + '\\' + filename)
Example #40
0
import pyscreenshot
import datetime
import time

time.sleep(3)  #Why Delay??
im = pyscreenshot.grab()
im.save('screenshot-' + str(datetime.datetime.now()) + '.png')
im.show()
Example #41
0
os.system("xdotool mousemove 1593 170")
time.sleep(5)
os.system("xdotool mousedown 1")
time.sleep(0.7)
os.system("xdotool mousemove 1593 218")
time.sleep(0.7)
os.system("xdotool mouseup 1")

tchest = 0
t1 = time.time()
dArenaSimilarityLimit = 0.1
dSimilarityLimit = 0.1

while (time.time() - t1 < 350000):
    time.sleep(3)
    im = ImageGrab.grab()
    im.save(sPath)

    im1 = cv2.imread(sPath)
    im2 = cv2.imread(sArena)
    im3 = cv2.imread(sMain)
    aimg = im1[641:717, 170:979, :]
    bimg = im2[641:717, 170:979, :]

    print "Arena similarity " + str(fSimilarity(aimg, bimg))
    if fSimilarity(aimg, bimg) < dArenaSimilarityLimit:
        print "Arena screen"

        if (time.time() - tchest > 30 * 60):
            fChestOpener(sPath, sChests)
            time.sleep(5)
Example #42
0
def ScreenShot():
    FILENAME = time.strftime("%Y%m%d-%H%M%S")
    img = ImageGrab.grab()
    img.save(SCREENSHOT_PATH + FILENAME + '.jpg', 'JPEG')
Example #43
0
def Fullscreen(currentupload):
	im = ImageGrab.grab()
	im.save(currentupload.getFilename())
Example #44
0
mouseButtons = {'1':'left','2':'middle','3':'right'} 
#UDP
udpSocket = socket.socket(socket.AF_INET, # Internet
	socket.SOCK_DGRAM) # UDP
clientIP = 'localhost'

#TCP
tcpSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpSocket.bind((ip,port))
tcpSocket.listen(1);
connection,addr = tcpSocket.accept();
connection.settimeout(0.01)
clientIP = addr

scale = 0.6
img = ImageGrab.grab()
connection.send(("MODE:"+img.mode+"/WIDTH:"+str(img.size[0])+"/HEIGHT:"+str(img.size[1])+"/SCALE:"+str(scale)).encode())

mousePos = (0,0)
try:
	while True:
		#Send screen share
		screenGrab = ImageGrab.grab()
		screenGrab = packageImage(screenGrab,scale)
		
		screenGrab.save("buffer.png",'PNG')
		f = open('buffer.png','rb')
		data = f.read()
		f.close()
		
		while len(data) >= 65507:
def read_phone_number(posmsj, reg):
    pyautogui.click(posmsj)  # Voy a la posicion 1 y clickeo
    im = pyscreenshot.grab(bbox=reg)
    text = pytesseract.image_to_string(im, lang='spa')
    return text.upper()
Example #46
0
def main(*args, **kwargs):
    parser = argparse.ArgumentParser(description='Process tile images')

    parser.add_argument(
        'files',
        metavar='FILE',
        nargs='*',
        type=str,
        help=
        'File(s) to process.  If omitted, screen will be captured instead.  Files can be a directory.'
    )
    parser.add_argument('-d',
                        '--path',
                        action='store',
                        default='data',
                        help='Path to store tile data')
    parser.add_argument(
        '-s',
        '--screenshot',
        action='store_true',
        help='Take screenshot even if other arguments are present.')
    parser.add_argument('-c',
                        '--composite',
                        action='store_true',
                        help='Generate composites.')
    parser.add_argument('-t',
                        '--test',
                        action='store_true',
                        help='Test existing data against composites.')
    parser.add_argument('-r',
                        '--refresh',
                        action='store_true',
                        help='Refresh existing images if present.')

    State.opts = opts = parser.parse_args(*args, **kwargs)

    log.debug(f'opts: {opts!r}')

    if not (opts.composite or opts.files or opts.test):
        opts.screenshot = True
        log.debug('Setting implicit grab option')

    State.datadir = pathlib.Path(opts.path).resolve()
    log.debug(f'Data directory: {State.datadir}')

    generate_index()
    if not opts.refresh:
        State.done.update(State.index)

    processed_paths = set()

    if opts.screenshot:
        screenshot = pyscreenshot.grab()
        if State.screenshotdir is None:
            State.screenshotdir = State.datadir / '_screenshots'
            State.screenshotdir.mkdir(exist_ok=True)

        timestamp = datetime.datetime.now().strftime('%Y.%m.%d-%H.%M.%S')

        fn = State.screenshotdir / f'screenshot.{timestamp}.png'
        screenshot.save(fn, optimize=True)
        log.info(f'Saved screenshot to {fn}')
        process_image(screenshot, 'acquired screenshot')
        processed_paths.add(fn)

    for filename in opts.files:
        path = pathlib.Path(filename).resolve()

        if path in processed_paths:
            continue

        if path.is_dir:
            files = path.glob('*.png')
        elif path.is_file:
            files = [path]
        else:
            log.error(f'No such file/directory: {path!r}')
            return False

        for file in files:
            if file in processed_paths:
                continue

            process_image(PIL.Image.open(file), file)
            processed_paths.add(file)

    if opts.composite:
        groups = defaultdict(list)
        for file in State.index.values():
            if file.parent == '_unsorted':
                continue
            groups[file.parent].append(file)

        for group, files in groups.items():
            fn = State.datadir / f'composite.{group.name}.png'
            generate_composite(fn, files)

    if opts.test:
        failed = []
        passed = []
        composites = {}
        maxnamelen = 0

        for path in State.datadir.glob('composite.*.png'):
            result = re.match(r'^composite\.(.+)\.png$', path.name.lower())
            if not result:
                continue
            groupname = result.group(1)
            maxnamelen = max(maxnamelen, len(groupname))
            composites[groupname] = PIL.Image.open(path)

        fmt = "{1:" + str(maxnamelen) + "}: {0:6.3f}"

        for file in sorted(State.index.values()):
            expected = file.parent.name
            results = []
            image = PIL.Image.open(file)
            print(f'{file}: ')
            for group, composite in composites.items():
                results.append((imageutil.score(composite, image,
                                                exponent=2), group))
            results.sort(reverse=True)

            best = results[-1][1]

            if best == expected:
                print(
                    f"Expected: {expected} -- Best: {results[-1][1]} (score: {results[-1][0]:6.3f} -- GOOD"
                )
                passed.append(file)
                continue

            if expected == '_unsorted':
                print(
                    f"Expected: {expected} -- Best: {results[-1][1]} -- +++Attempting classification+++"
                )
                path = file.parent / best
                path.mkdir(exist_ok=True)
                file.rename(path / file.name)
            else:
                print(
                    f"Expected: {expected} -- Best: {results[-1][1]} -- ****FAILED MATCHING****"
                )
                failed.append((file, best))

            for result in results:
                print(fmt.format(*result))

        numpassed = len(passed)
        numfailed = len(failed)

        print(f'{numpassed} images passed, {numfailed} failed.')

        if failed:
            print('Failed images: ')
            for file, result in failed:
                print(f'    {file} (was {result})')
user32.SetProcessDPIAware()
from PIL import Image
import xlsxwriter
import PIL.ImageOps
import pytesseract
import pyscreenshot as ImageGrab

workbook = xlsxwriter.Workbook('spotifyactivity.xlsx')
worksheet_data = workbook.add_worksheet('data')
worksheet_analysis = workbook.add_worksheet('analysis')
workbook.close()

if __name__ == '__main__':
    # part of the screen

    im = ImageGrab.grab(bbox=(3400, 220, 3800, 550))  # X1,Y1,X2,Y2

    im.save('im.png')

    image = Image.open('im.png')

    inverted_image = PIL.ImageOps.invert(image)

    inverted_image.save('im.png')

    inverted_image.show()

    text = pytesseract.image_to_string(inverted_image, lang='eng')

    print(text)
import pyscreenshot

screenshot = pyscreenshot.grab(bbox=(10, 10, 500, 500))  #Limita la pantalla
screenshot.show()  #EnseƱa la captura de pantalla, la puedes desactivar

pyscreenshot.grab_to_file('cp.png')  #Guarda la captura de pantalla
Example #49
0
def getScreenShot(box=(0, 0, 1279, 1023)):
    '''Gets cursor out of the wat for a clean shot'''
    Point(0, 0).hover()
    return ImageGrab.grab(box)
#!/usr/bin/env python
from socket import *
import pyscreenshot as ImageGrab
import time

host = "192.168.9.20"  # set to IP address of targe
port = 13000
addr = (host, port)
UDPSock = socket(AF_INET, SOCK_DGRAM)
buf = 1024
while (True):
    image = ImageGrab.grab()
    image.save("/var/www/html/image.png", format="png")
    time.sleep(5)
Example #51
0
		return True
	return False



while True:
	if key_pressed(0x11) and key_pressed(binded_key): # 0x11 = "T"
		Start_pos = [0, 0]
		End_pos = [0, 0]
	while key_pressed(binded_key):
		x, y = pyautogui.position()
		if Start_pos[0] == 0 and Start_pos[1] == 0:
			Start_pos = [x, y]
		time.sleep(0.01)
	if End_pos[0] == 0 and End_pos[1] == 0:
		End_pos = [x , y]
	print(Start_pos, End_pos)
	if Start_pos[0] != 0 and Start_pos[1] != 0 and End_pos[0] != 0 and End_pos[1] != 0:
		screen = np.array(ImageGrab.grab(bbox = (Start_pos[0], Start_pos[1], End_pos[0], End_pos[1])))
		cv2.imwrite(filename, screen)
		img = cv2.imread('Image.png')
		text = pytesseract.image_to_string(img)
		text = text.replace("\r"," ")
		text = text.replace("\n"," ")
		try:
			text_tr = translator.translate(text, src = 'en', dest = 'ru')
		except TypeError:
			print("")
		print(text_tr.text)
		print('loop took {} seconds'.format(time.time()-last_time))
		last_time = time.time()
Example #52
0
from googleapiclient.discovery import build
import pprint
import operator
import os
import argparse
from PIL import Image
import pytesseract
import pyscreenshot as ImageGrab
import codecs
import time
import datetime

## Grab screenshot of image (put on rig  ht side of screen!)
#im=ImageGrab.grab(bbox=(900,300,1500,700)) # for hq trivia
im=ImageGrab.grab(bbox=(1040,300,1400,670))# special
#im.show() #Uncomment this if you want to see image

# save to file

pic_filename = "question_picture1.png"

im.save(pic_filename)


pic_filename = "question_picture1.png"

im = Image.open(pic_filename)

text = pytesseract.image_to_string(im, lang = 'eng')

col1 = 20
Example #53
0
#!/usr/bin/env python3
# encoding: utf8

# usage: s.py time_before_take_screenshot(in seconds)

import pyscreenshot as ImageGrab

import os
import sys
import time

time.sleep(int(sys.argv[1]))

s = ImageGrab.grab()

s.save('/tmp/tmp.png')
 def getSnapshot(app):
     app._showRootWindow()
     x0 = app._root.winfo_rootx() + app._canvas.winfo_x()
     y0 = app._root.winfo_rooty() + app._canvas.winfo_y()
     result = ImageGrabber.grab((x0,y0,x0+app.width,y0+app.height))
     return result
Example #55
0
import cv2
import subprocess
import numpy as np
import pyscreenshot as ImageGrab

from screen import get_board
from solver_v5 import Board as BoardV5
from solver_v6 import Board as BoardV6

from mylib import StopWatch

print("Please click on the window of the game LYNE.")
res = subprocess.getoutput('xwininfo')
num = [int(s) for s in res.split() if s.isdigit()]

img = ImageGrab.grab(bbox=(num[0], num[1], num[0] + num[4], num[1] + num[5]))
if img.mode == 'P':
    img = img.convert('RGB')
img = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)

board, xs, ys = get_board(img)
print('\n'.join([''.join(t) for t in board]))

# board = ['ddd', 'd4d', '22d', '23d', '2D2', 'Ddd']
with StopWatch():
    print('solver v5')
    b = BoardV5(board)
    for path in b.solve():
        b._print_path(path)
with StopWatch():
    print('solver v6')
Example #56
0
try:
    import Image
except ImportError:
    from PIL import Image
import time
import ctypes
import cv2
import numpy as np

if __name__ == '__main__':
    previous_pos = None
    while True:
        user32 = ctypes.windll.user32
        width = user32.GetSystemMetrics(0)
        height = user32.GetSystemMetrics(1)
        im = ImageGrab.grab(bbox=((width / 4), 0, width - (width / 4),
                                  height - (height / 4)))
        image = np.array(im.convert('RGB'))
        image = image[:, :, ::-1].copy()
        blur = cv2.blur(image, (5, 5))
        brightest = np.array([100, 255, 255], dtype='uint8')
        lowest = np.array([0, 100, 100], dtype='uint8')
        thresh = cv2.inRange(blur, lowest, brightest)
        image, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST,
                                                      cv2.CHAIN_APPROX_SIMPLE)
        max_area = 0
        best_cnt = 1
        for cnt in contours:
            area = cv2.contourArea(cnt)
            if area > max_area:
                max_area = area
                best_cnt = cnt
Example #57
0
    def find_grid(
        self,
        button: Optional["Box"] = None
    ) -> Optional[Tuple[str, int, int, int, int]]:
        """locate 4x4 or 3x3 grid on-screen, or estimate its approximate location."""

        # Take screenshot
        screen = ImageGrab.grab()

        # Invert screenshot
        screen_invert = self.invert_img(screen)
        screen_invert.save("screen_invert.png")

        if button is None:
            try:
                box = locate(
                    os.path.join(self.fullpath, "black4x4.png"),
                    "screen_invert.png",
                    confidence=0.5,
                )
                assert hasattr(box, "left")
                return (
                    "4x4",
                    int(box.left),
                    int(box.top),
                    int(box.width),
                    int(box.height),
                )
            except:
                pass

            try:
                box = locate(
                    os.path.join(self.fullpath, "black3x3.png"),
                    "screen_invert.png",
                    confidence=0.5,
                )
                assert hasattr(box, "left")
                return (
                    "3x3",
                    int(box.left),
                    int(box.top),
                    int(box.width),
                    int(box.height),
                )
            except:
                pass

        else:
            try:
                # Guessing enabled. Use button as reference...
                assert hasattr(button, "left")

                # Compute coordinate references
                box_top = (
                    int(button.top)  # type: ignore
                    - 552 + int((button.height + button.height % 2) /
                                2)  # type: ignore
                )
                box_left = (
                    int(button.left)  # type: ignore
                    - 342 + int(
                        (button.width + button.width % 2) / 2)  # type: ignore
                )

                return ("unknown", box_left, box_top, 400, 520)
            except Exception as e:
                print(e)
                pass

        return None
def check_spam(pos, posbtnspam, reg):
    pyautogui.click(pos)  # Voy a la posicion 1 y clickeo
    im = pyscreenshot.grab(bbox=reg)
    text = pytesseract.image_to_string(im, lang='spa')
    if text.upper() == 'NO ES SPAM':
        pyautogui.click(posbtnspam)  # Voy a la posicion 1 y clickeo
Example #59
0
def scr():
    im = ss.grab()
    im.save(file_path + extend + screen_info)
Example #60
0
 def screenshot(self):
     """Sets the image to a screenshot"""
     self.set_image(
         pyscreenshot.grab(self.extents.coords), cropped=True
     )