def main():
    # countdown of 5
    for i in range(6)[::-1]:
        print('Stating in {}'.format(i))
        time.sleep(1)

    initial_time = time.time()

    # main loop
    while True:
        # grabbing the screen
        screen = grab_screen([10, 40, 770, 700])
        # converting to gray
        screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
        # getting the region of interest
        roi = screen[300:, 100:]
        # resizing the frame to (80, 60)
        roi = cv2.resize(roi, (480, 270))
        # checking the keystroke
        key = key_check()
        # getting the one hot encoding of the key
        key = oneHotEncoding(key)
        # appending the frame with particular key stoke
        training_data.append([roi, key])

        print('Fps: {}'.format(time.time() - initial_time))
        initial_time = time.time()

        # if length of training data is 500, we stop and save
        if len(training_data) % 200 == 0:
            print('Length is {}, now saving the data'.format(
                len(training_data)))
            np.save(trainFile, training_data)
def main():
    global do
    while True:
        frame = grab_screen([10, 40, 770, 700])
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        roi = frame[300:,100:]
        roi = cv2.resize(roi, (80,60))
        temp = roi
        roi = img_preProcess(roi)
        if do == True:
            prediction = predict(roi)
            prediction = prediction.argmax(axis=1)
            
            if prediction == 0:
                right()
            elif prediction == 1:
                jump()
            elif prediction == 2:
                rightJump()
        
            print(prediction)

        temp = cv2.resize(temp, (320,240))
        cv2.imshow('frame', temp)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
示例#3
0
def main():
    file_name = 'training_data_takeoff'
    training_data = load_data(file_name)

    paused = False

    print('Initialising Data Collection in:')
    countdown()
    while True:
        if not paused:
            try:
                screen = grab_screen()
                screen = cv2.resize(screen, (240, 135),
                                    interpolation=cv2.INTER_AREA)
                output = keys_to_output(key_check())
                print(f'Output is : {output}')
                training_data.append([screen, output])
                if len(training_data) % 1000 == 0:
                    print(f'Length of training data is : {len(training_data)}')
                    np.save(file_name, training_data)
            except:
                print("Window not available. Please try again")

        keys = key_check()
        if 'T' in keys:
            if paused:
                countdown()
                paused = False
                print('Unpaused!')
                time.sleep(1)
            else:
                print('Pausing!')
                paused = True
                time.sleep(1)
def feeding_img():
    global gaa
    while (True):
        last_time = time.time()

        x, y = win32api.GetCursorPos()

        # print(x,y)
        # screengrab =

        # cv2.imshow('screen capture',cv2.cvtColor(np.array(screengrab),cv2.COLOR_BGR2RGB)) # shows the window, but with RGB conversion

        # if cv2.waitKey(25) & 0xFF == ord('q'):
        # 	cv2.destroyAllWindows()
        # 	break

        gaa = grab_screen([x - 250, y - 250, x + 250, y + 250])

        print("time update : {} seconds with FPS :  {}".format(
            time.time() - last_time, 1 / (time.time() - last_time)))
示例#5
0
def main():
    file_name = 'training_data_takeoff'
    training_data = load_data(file_name)

    paused = False

    print('Initialising Data Collection in:')
    countdown()
    while True:
        if not paused:
            try:
                screen = grab_screen()
                screen = cv2.resize(screen, (240, 135),
                                    interpolation=cv2.INTER_AREA)  #resize
                ##            cv2.imshow('window',screen)
                ##            if cv2.waitKey(25) & 0xFF == ord('q'):
                ##                 cv2.destroyAllWindows()
                ##                 break
                output = keys_to_output(key_check())
                print(output)
                training_data.append([screen, output])

                if len(training_data) % 1000 == 0:
                    print(len(training_data))
                    np.save(file_name, training_data)

            except:
                print("Window not available. Please try again")

        keys = key_check()
        if 'T' in keys:
            if paused:
                countdown()
                paused = False
                print('Unpaused!')
                time.sleep(1)
            else:
                print('Pausing!')
                paused = True
                time.sleep(1)
示例#6
0
def base(path):
    return os.path.basename(path)


# Hardcoded from model
class_names = [
    'apex', 'csgo', 'dbd', 'eft', 'fifa-21', 'fortnite', 'gtav', 'lol',
    'minecraft', 'poe', 'rocket-league', 'rust', 'valorant', 'warzone', 'wow'
]

# Load the bitch
model = tf.keras.models.load_model('models/trained-2k')

while True:
    # Use (0, 0, 1920, 1080) for center/main monitor (if 1080p)
    screen_raw = grab_screen(region=(-1920, 127, -340, 1016))
    # Resize to input into CNN
    screen = cv2.resize(screen_raw, (w, h))

    # Do something
    img_array = keras.preprocessing.image.img_to_array(screen)
    img_array = tf.expand_dims(img_array, 0)  # Create a batch

    # Predict
    s = time()
    prediction = model.predict(img_array)
    e = time()

    # Compute confidence
    score = tf.nn.softmax(prediction[0])
    pred = class_names[np.argmax(score)]
示例#7
0
counterclick= 0
clicktimechecker = 0
screenx, screeny = 3840//2 , 2160//2
print("Alive")

while(True):
	
	
	# x,y = win32api.GetCursorPos()


	# screengrab = 
	
	
		
	screengrab = np.array(grab_screen([screenx-250,screeny-250,screenx+250,screeny+250]))

	# cv2.imshow('screen capture',cv2.cvtColor(np.array(gaa),cv2.COLOR_BGR2RGB)) # shows the window, but with RGB conversion


	


	last_time = time.time()
	# testing time
	mask = cv2.inRange(cv2.cvtColor(screengrab, cv2.COLOR_BGR2HSV), lower_bound, upper_bound)
	result_Frame = cv2.bitwise_and(cv2.cvtColor( screengrab,cv2.COLOR_BGR2RGB),cv2.cvtColor( screengrab,cv2.COLOR_BGR2RGB),mask=mask)

	cv2.imshow("RESULT", result_Frame)
	cv2.imshow('nani',screengrab)
	
counterclick= 0
clicktimechecker = 0
x, y = 3840//2 , 2160//2
print("Alive")

while(True):
	
	
	# x,y = win32api.GetCursorPos()


	# screengrab = 
	
	
		
	screengrab = np.array(grab_screen([x-250,y-250,x+250,y+250]))

	# cv2.imshow('screen capture',cv2.cvtColor(np.array(gaa),cv2.COLOR_BGR2RGB)) # shows the window, but with RGB conversion


	


	last_time = time.time()
	# testing time
	mask = cv2.inRange(cv2.cvtColor(screengrab, cv2.COLOR_BGR2HSV), lower_bound, upper_bound)
	result_Frame = cv2.bitwise_and(cv2.cvtColor( screengrab,cv2.COLOR_BGR2RGB),cv2.cvtColor( screengrab,cv2.COLOR_BGR2RGB),mask=mask)
	cur_image = image.img_to_array(f****e)
	cur_image = np.expand_dims(cur_image,axis=0)
	# about 0.002 sec
counterclick= 0
clicktimechecker = 0
sx, sy = 3840//2 , 2160//2
print("Alive")

while(True):
	
	
	# x,y = win32api.GetCursorPos()


	# screengrab = 
	
	
		
	screengrab = np.array(grab_screen([sx-250,sy-250,sx+250,sy+250]))

	# cv2.imshow('screen capture',cv2.cvtColor(np.array(gaa),cv2.COLOR_BGR2RGB)) # shows the window, but with RGB conversion


	


	last_time = time.time()
	# testing time
	mask = cv2.inRange(cv2.cvtColor(screengrab, cv2.COLOR_BGR2HSV), lower_bound, upper_bound)
	result_Frame = cv2.bitwise_and(cv2.cvtColor( screengrab,cv2.COLOR_BGR2RGB),cv2.cvtColor( screengrab,cv2.COLOR_BGR2RGB),mask=mask)
	cur_image = image.img_to_array(result_Frame)
	cur_image = np.expand_dims(cur_image,axis=0)
	# about 0.002 sec
folder_creation("ClassifiedData")
folder_creation("ClassifiedData/hit")
folder_creation("ClassifiedData/miss")


while(True):
	
	Lmouse = win32api.GetAsyncKeyState((win32con.VK_LBUTTON))
	Ctrl = win32api.GetAsyncKeyState((win32con.VK_CONTROL))
	Shift = win32api.GetAsyncKeyState((win32con.VK_SHIFT))

	x,y = win32api.GetCursorPos()

	print(x,y)
	screengrab = grab_screen([x-250,y-250,x+250,y+250])
	# printscreen_numpy= np.array(printscreen_pil.getdata(), dtype = 'uint8').reshape( (printscreen_pil.size[1],printscreen_pil.size[0],3))
	
	
	
	cv2.imshow('screen capture',cv2.cvtColor( np.array(screengrab),cv2.COLOR_BGR2RGB))

	last_time = time.time()
	if cv2.waitKey(1) & 0xFF == ord('q'):
		cv2.destroyAllWindows()
		break
	print("time update : {} seconds".format(time.time() - last_time))

	print(str(Lmouse) + "  " + str(Ctrl) + " " + str(Shift))

	if( Shift != 0):
示例#11
0
    Ctrl = win32api.GetAsyncKeyState((win32con.VK_CONTROL))
    Shift = win32api.GetAsyncKeyState((win32con.VK_SHIFT))
    MouseB, MouseF = win32api.GetAsyncKeyState(
        (win32con.VK_XBUTTON1)), win32api.GetAsyncKeyState(
            (win32con.VK_XBUTTON2))

    default_resolution_x = win32api.GetSystemMetrics(
        win32con.SM_CXVIRTUALSCREEN)
    default_resolution_y = win32api.GetSystemMetrics(
        win32con.SM_CYVIRTUALSCREEN)

    # x,y = win32api.GetCursorPos()

    # print(x,y)
    screengrab = grab_screen([
        default_resolution_x // 2 - 250, default_resolution_y // 2 - 250,
        default_resolution_x // 2 + 250, default_resolution_y // 2 + 250
    ])
    # printscreen_numpy= np.array(printscreen_pil.getdata(), dtype = 'uint8').reshape( (printscreen_pil.size[1],printscreen_pil.size[0],3))

    cv2.imshow('screen capture',
               cv2.cvtColor(np.array(screengrab), cv2.COLOR_BGR2RGB))
    mask = cv2.inRange(cv2.cvtColor(screengrab, cv2.COLOR_BGR2HSV),
                       lower_bound, upper_bound)
    result = cv2.bitwise_and(cv2.cvtColor(screengrab, cv2.COLOR_BGR2RGB),
                             cv2.cvtColor(screengrab, cv2.COLOR_BGR2RGB),
                             mask=mask)

    last_time = time.time()
    cv2.imshow("result", result)

    if cv2.waitKey(1) & 0xFF == ord('q'):
示例#12
0
def forward_right():
    PressKey(W)
    PressKey(D)
    ReleaseKey(A)
    ReleaseKey(S)


model = load_model('simpleCNN.h5')

for i in list(range(4))[::-1]:
    print(i + 1)
    time.sleep(1)

while True:
    curr_view = grab_screen([0, 30, 800, 620])
    #cv2.imshow('frame', curr_view)
    #cv2.imshow('frame',cv2.cvtColor(curr_view, cv2.COLOR_BGR2GRAY))
    #screen = cv2.imshow('frame',cv2.cvtColor(curr_view, cv2.COLOR_BGR2GRAY))
    screen = cv2.cvtColor(curr_view, cv2.COLOR_BGR2GRAY)

    screen = cv2.resize(screen, (60, 60))
    cv2.imshow('screen', screen)
    screen = screen[np.newaxis, ..., np.newaxis]
    prediction = model.predict(screen)
    prediction = np.array(prediction)
    mode_choice = np.argmax(prediction)

    if mode_choice == 0:
        straight()
        choice_picked = 'straight'
import cv2
import numpy as np
from input import PressKey, Z, X, L, J, ReleaseKey
from screengrab import grab_screen
from encoding import oneHotEncoding
from getKeys import key_check
""" Right+JUMP(L+Z) = [0,0,1]
RIGHT (L) = [1,0,0]
JUMP (Z) = [0,1,0] """

while True:
    screen = grab_screen([10, 40, 770, 700])
    #screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
    roi = screen[300:, 100:]
    roi = cv2.resize(roi, (80, 60))
    cv2.imshow('frame', screen)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cv2.destroyAllWindows()