コード例 #1
0
def main(file_name, starting_value):
    file_name = file_name
    starting_value = starting_value
    training_data = []
    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

    last_time = time.time()
    paused = False
    print('STARTING!!!')
    while (True):

        if not paused:
            screen = grab_screen(region=(0, 40, GAME_WIDTH, GAME_HEIGHT + 40))
            last_time = time.time()
            # resize to something a bit more acceptable for a CNN
            screen = cv2.resize(screen, (80, 60))
            # run a color convert:
            screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)

            keys = key_check()
            output = keys_to_output(keys)
            training_data.append([screen, output])

            # print('loop took {} seconds'.format(time.time()-last_time))
            last_time = time.time()
            ##            cv2.imshow('window',cv2.resize(screen,(640,360)))
            ##            if cv2.waitKey(25) & 0xFF == ord('q'):
            ##                cv2.destroyAllWindows()
            ##                break

            if len(training_data) % 100 == 0:
                print(len(training_data))

                if len(training_data) == 500:
                    np.save(file_name, training_data)
                    print('SAVED')
                    training_data = []
                    starting_value += 1
                    file_name = 'C:/Users/stepanovep/PycharmProjects/PyGta5/phase7-larger-color/training_data-{}.npy'.format(
                        starting_value)

        keys = key_check()
        if 'T' in keys:
            if paused:
                paused = False
                print('unpaused!')
                time.sleep(1)
            else:
                print('Pausing!')
                paused = True
                time.sleep(1)
コード例 #2
0
def main(file_name, starting_value):
    file_name = file_name
    starting_value = starting_value
    training_data = []
    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

    last_time = time.time()
    paused = False
    print('STARTING!!!')
    while (True):
        time.sleep(0.1)
        if not paused:
            screen = grab_screen(region=(100, 100, 1350, 1030))
            last_time = time.time()
            # resize to something a bit more acceptable for a CNN
            screen = cv2.resize(screen, (480, 270))
            # run a color convert:
            screen = cv2.cvtColor(screen, cv2.COLOR_BGR2RGB)

            keys = key_check()
            output = keys_to_output(keys)
            training_data.append([screen, output])

            # print('loop took {} seconds'.format(time.time()-last_time))
            last_time = time.time()
            # cv2.imshow('window',cv2.resize(screen,(640,360)))
            # if cv2.waitKey(25) & 0xFF == ord('q'):
            #    cv2.destroyAllWindows()
            #    break

            if len(training_data) % 100 == 0:
                print(len(training_data))

                if len(training_data) == 500:
                    np.save(file_name, training_data)
                    print('SAVED')
                    training_data = []
                    starting_value += 1
                    file_name = 'C:/Users/ADR/Documents/Most_Wanted_Self_Driving_Car/TrainingData/training_data-{}.npy'.format(
                        starting_value)

        keys = key_check()
        if 'T' in keys:
            if paused:
                paused = False
                print('unpaused!')
                time.sleep(1)
            else:
                print('Pausing!')
                paused = True
                time.sleep(1)
コード例 #3
0
def main():
    file_name = "training_data.npy"
    if os.path.isfile(file_name):
        training_data = list(np.load(file_name))
    else:
        training_data = []


    for i in list(range(4))[::-1]:
        print(i+1)
        time.sleep(1)

    lst = time.time()
    while(True):
        screen = np.array(sct.grab((0,40,800,640)))
        screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
        screen = cv2.resize(screen, (80,60))
        keys = key_check()
        output = keys_to_output(keys)
        training_data.append([screen, output])
        # print("Loop took {}".format((time.time()-lst)))
        lst = time.time()

        if len(training_data) % 500 == 0:
            print("Saving data")
            np.save(file_name, training_data)
コード例 #4
0
def main():
    # countdown of 5
    for i in range(6)[::-1]:
        print('Stating in {}'.format(i))
        time.sleep(1)

    initial_time = time.time()

    # main loop
    while True:
        # grabbing the screen
        screen = grab_screen([10, 40, 770, 700])
        # converting to gray
        screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
        # getting the region of interest
        roi = screen[300:, 100:]
        # resizing the frame to (80, 60)
        roi = cv2.resize(roi, (480, 270))
        # checking the keystroke
        key = key_check()
        # getting the one hot encoding of the key
        key = oneHotEncoding(key)
        # appending the frame with particular key stoke
        training_data.append([roi, key])

        print('Fps: {}'.format(time.time() - initial_time))
        initial_time = time.time()

        # if length of training data is 500, we stop and save
        if len(training_data) % 200 == 0:
            print('Length is {}, now saving the data'.format(
                len(training_data)))
            np.save(trainFile, training_data)
コード例 #5
0
def main():

    A_counter = 10674
    W_counter = 21641
    D_counter = 14518

    for i in range(5, 0, -1):
        print(i)
        time.sleep(1)

    while True:
        game_frame = grab_screen()
        game_frame = cv2.cvtColor(game_frame, cv2.COLOR_BGR2GRAY)
        key_pressed = key_check()

        if key_pressed == 'A':
            cv2.imwrite(DATADIR + "A/" + str(A_counter) + ".jpg", game_frame)
            A_counter = A_counter + 1
        elif key_pressed == 'W':
            cv2.imwrite(DATADIR + "W/" + str(W_counter) + ".jpg", game_frame)
            W_counter = W_counter + 1
        if key_pressed == 'D':
            cv2.imwrite(DATADIR + "D/" + str(D_counter) + ".jpg", game_frame)
            D_counter = D_counter + 1
        else:
            pass

        cv2.imshow('game_frame', game_frame)
        if cv2.waitKey(1) == 27:
            break

    cv2.destroyAllWindows()
コード例 #6
0
def screen_record():
    lc = 0
    rc = 0
    sc = 0

    count = 0

    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

        # 800x600 windowed mode

    while (True):
        last_time = time.time()
        screen = grab_screen(region=(0, 400, 800, 600))
        #screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
        screen = cv2.resize(screen, (128, 128))
        keys = key_check()
        keystrokes = presskeys_to_keystrokes(keys)
        #print("Frames capturing at {} fps".format(1.0 / time.time()-last_time))
        last_time = time.time()
        path = "C:/Users/ajeya/PycharmProjects/Self Driving Car/data"
        name = 'image' + str(count) + '.png'
        if 'A' in keys:
            lc += 1
            name = 'image' + str(lc) + '.png'
            print(keys)
            path = path + "/left"
            cv2.imwrite(os.path.join(path, name), screen)
        elif 'W' in keys:
            sc += 1
            name = 'image' + str(sc) + '.png'
            print(keys)
            path = path + "/straight"
            cv2.imwrite(os.path.join(path, name), screen)
        elif 'D' in keys:
            rc += 1
            name = 'image' + str(rc) + '.png'
            print(keys)
            path = path + "/right"
            cv2.imwrite(os.path.join(path, name), screen)
        else:
            print(keys)
            path = path + "/slow"
            pass
            #cv2.imwrite(os.path.join(path, name), screen)

        count += 1
        #new_img = cv2.resize(screen,(500,400))
        #backtorgb = cv2.cvtColor(new_screen, cv2.COLOR_GRAY2RGB)

        #cv2.imshow('Window', cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
        cv2.imshow('window', screen)
        #cv2.imwrite(os.path.join(path,name), new_screen)

        if cv2.waitKey(25) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
コード例 #7
0
def main():
    last_time = time.time()
    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

    paused = False
    while (True):

        if not paused:
            # 800x600 windowed mode
            screen = grab_screen(region=(0, 40, 800, 640))
            print('loop took {} seconds'.format(time.time() - last_time))
            last_time = time.time()
            screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
            screen = cv2.resize(screen, (160, 120))

            prediction = model.predict([screen.reshape(160, 120, 1)])[0]
            print(prediction)

            if np.argmax(prediction) == np.argmax(w):
                straight()

            elif np.argmax(prediction) == np.argmax(s):
                reverse()
            if np.argmax(prediction) == np.argmax(a):
                left()
            if np.argmax(prediction) == np.argmax(d):
                right()
            if np.argmax(prediction) == np.argmax(wa):
                forward_left()
            if np.argmax(prediction) == np.argmax(wd):
                forward_right()
            if np.argmax(prediction) == np.argmax(sa):
                reverse_left()
            if np.argmax(prediction) == np.argmax(sd):
                reverse_right()
            if np.argmax(prediction) == np.argmax(nk):
                no_keys()

        keys = key_check()

        # p pauses game and can get annoying.
        if 'T' in keys:
            if paused:
                paused = False
                time.sleep(1)
            else:
                paused = True
                ReleaseKey(A)
                ReleaseKey(W)
                ReleaseKey(D)
                time.sleep(1)
コード例 #8
0
ファイル: marioPython.py プロジェクト: beerjamin/projects
def main():
    for i in list(range(4))[::-1]:
        print(i+1)
        time.sleep(1)
    last_time = time.time()

    while True:
        screen = grab_screen(region=(0,40,800,640))
        screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
        screen = cv2.resize(screen,(80,60))
        keys = key_check()
        output = keys_to_output(keys)
        training_data.append([screen,output])
        print('Frame took {} seconds'.format(time.time()-last_time))
        last_time = time.time()

        #cv2.imshow('window2',cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB))

        if len(training_data) % 500 == 0:
            print(len(training_data))
            np.save(file_name,training_data)
コード例 #9
0
	def streamingAndCollectData(self):
		saved_frame = 0
		total_frame = 0
		print('Start collecting images...')
		e1 = cv2.getTickCount()

		try:
			print("Connection from: ", self.client_address)
			print("Streaming...")
			stream_bytes = b''
			frame = 1
			while True:
				stream_bytes += self.connection.read(1024)
				first = stream_bytes.find(b'\xff\xd8')
				last = stream_bytes.find(b'\xff\xd9')
				self.conn.sendall(b'WA')
				if first != -1 and last != -1:
					jpg = stream_bytes[first:last + 2]
					stream_bytes = stream_bytes[last + 2:]
					image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
					image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
					height, width = image.shape
					# select lower half of the image
					roi = image[int(height/2):height, :]

					# Predict the data based on image frame received.
					prediction = self.predict(roi)

					# Control based on prediction
					self.navigate(prediction)
	
					# Press Q to Quit runnuing the car..
					keys = key_check()
                    if 'Q' in keys:
						payload = dict(data='x')
						response = requests.post(self.restUrl, params=payload)
						print(response, payload, 'sent to server.')
						exit(0)

		finally:
コード例 #10
0
def main(file_name, set_count):
    count = 0
    start_collecting = False
    while (True):
        #start = time.time()
        screen = cv2.cvtColor(grab_screen(region=(0, 100, 800, 540)),
                              cv2.COLOR_BGR2GRAY)
        mini_screen = cv2.resize(screen, (160, 108))

        cv2.imshow('screen', screen)

        if (cv2.waitKey(25) & 0xFF == ord('q')):
            cv2.destroyAllWindows()
            break

        keys = key_check()
        label = keys_to_array(keys)

        if ('P' in keys):
            start_collecting = not (start_collecting)
            if (start_collecting):
                print("unpaused")
            else:
                print("paused")

        if (start_collecting and label is not None):
            train_data.append([mini_screen, label])
            count += 1

        if (count % 500 == 0):
            print("have collected {} data points".format(str(count)))
        #save after every 5000 datapoints
        if (count % 20000 == 0):
            print("have collected {} data points".format(str(count)))
            np.save(file_name, train_data)
            if (count == 40000):
                set_count += 1
                file_name = "training_data" + str(set_count) + ".npy"
コード例 #11
0
def screen_record():


    count = 0

    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

        # 800x600 windowed mode

    while(True):
        last_time = time.time()
        screen = grab_screen(region=(0, 40, 800, 600))
        screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
        screen = cv2.resize(screen, (128, 128))
        keys = key_check()
        keystrokes = presskeys_to_keystrokes(keys)
        training_data.append([screen, keystrokes])
        #print("Frames capturing at {} fps".format(1.0 / time.time()-last_time))
        last_time = time.time()
        path = "C:/Users/ajeya/PycharmProjects/Self Driving Car/images"
        name = 'image'+str(count)+'.png'
        count += 1
        #new_img = cv2.resize(screen,(500,400))
        #backtorgb = cv2.cvtColor(new_screen, cv2.COLOR_GRAY2RGB)

        #cv2.imshow('Window', cv2.cvtColor(screen, cv2.COLOR_BGR2RGB))
        cv2.imshow('window', screen)
        #cv2.imwrite(os.path.join(path,name), new_screen)
        if len(training_data) % 400 == 0:
            print(len(training_data))
            np.save(data_file, training_data)


        if cv2.waitKey(25) & 0xFF == ord('q'):
            cv2.destroyAllWindows()
            break
コード例 #12
0
    crashing the vjoy driver  
"""

vj = vJoy()

x_range = 16393
z_range = 32786

wAxisX = 16393
wAxisY = 16393
wAxisZ = 0
wAxisXRot = 16393
wAxisYRot = 16393
wAxisZRot = 0

keys = key_check()
ultimate_release()

while True:
    vj.open()
    btn = 1
    joystickPosition = vj.generateJoystickPosition(wAxisX=12000, wAxisZ=32000, wAxisZRot=0)
    vj.update(joystickPosition)
    print("running")
    time.sleep(0.1)
    vj.sendButtons(0)

    keys = key_check()
    if 'P' in keys:
        joystickPosition = vj.generateJoystickPosition(wAxisX=16000, wAxisY=16000)
        vj.update(joystickPosition)
コード例 #13
0
    def streamingAndCollectData(self):
        saved_frame = 0
        total_frame = 0

        # collect images for training
        print('Start collecting images...')
        e1 = cv2.getTickCount()
        image_array = np.zeros((1, 38400))
        label_array = np.zeros((1, 4), 'float')

        try:
            print("Connection from: ", self.client_address)
            print("Streaming Pi Camera...")
            print("Press 'Q' to exit")

            stream_bytes = b''
            frame = 1
            while True:
                stream_bytes += self.connection.read(1024)
                first = stream_bytes.find(b'\xff\xd8')
                last = stream_bytes.find(b'\xff\xd9')
                self.conn.sendall(b'WA')
                if first != -1 and last != -1:
                    jpg = stream_bytes[first:last + 2]
                    stream_bytes = stream_bytes[last + 2:]
                    image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)
                    imageBW = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                    height, width = imageBW.shape

                    # select lower half of the image
                    roi = imageBW[int(height/2):height, :]

                    # Show Image
                    cv2.imshow('image', image)
                                        
                    temp_array = roi.flatten().astype(np.float32)
                    frame += 1
                    total_frame += 1
                    
                    # Listen to pygame event
                    for event in pygame.event.get():
                        keystate = pygame.key.get_pressed()
                        if event.type == pygame.QUIT:
                            print("Quit")
                            break
                        if event.type == pygame.KEYDOWN:
                            print("Start Controlling...")
                            if keystate[pygame.K_UP]:
                                print("Forward")
                                cv2.imwrite('training_images/Imageframe{:>05}.jpg'.format(frame), roi)
                                image_array = np.vstack((image_array, temp_array))
                                label_array = np.vstack((label_array, self.k[2]))
                                saved_frame += 1

                                payload = dict(data='s')
                                response = requests.post(self.restUrl, params=payload)
                                print(response, payload, 'sent to server.')

                            elif keystate[pygame.K_DOWN]:
                                print("Reverse")
                                cv2.imwrite('training_images/Imageframe{:>05}.jpg'.format(frame), roi)
                                image_array = np.vstack((image_array, temp_array))
                                label_array = np.vstack((label_array, self.k[3]))
                                saved_frame += 1

                                payload = dict(data='w')
                                response = requests.post(self.restUrl, params=payload)
                                print(response, payload, 'sent to server.')
                                        
                            elif keystate[pygame.K_LEFT]:
                                print("Left")
                                cv2.imwrite('training_images/Imageframe{:>05}.jpg'.format(frame), roi)
                                image_array = np.vstack((image_array, temp_array))
                                label_array = np.vstack((label_array, self.k[0]))
                                saved_frame += 1

                                payload = dict(data='a')
                                response = requests.post(self.restUrl, params=payload)
                                print(response, payload, 'sent to server.')
                    
                            elif keystate[pygame.K_RIGHT]:
                                print("Right")
                                cv2.imwrite('training_images/Imageframe{:>05}.jpg'.format(frame), roi)
                                image_array = np.vstack((image_array, temp_array))
                                label_array = np.vstack((label_array, self.k[1]))
                                saved_frame += 1

                                payload = dict(data='d')
                                response = requests.post(self.restUrl, params=payload)
                                print(response, payload, 'sent to server.')
                    
                            elif keystate[pygame.K_UP] and keystate[pygame.K_RIGHT]:
                                print("Forward Right")
                                cv2.imwrite('training_images/Imageframe{:>05}.jpg'.format(frame), roi)
                                image_array = np.vstack((image_array, temp_array))
                                label_array = np.vstack((label_array, self.k[1]))
                                saved_frame += 1

                                payload = dict(data='q')
                                response = requests.post(self.restUrl, params=payload)
                                print(response, payload, 'sent to server.')

                            elif keystate[pygame.K_UP] and keystate[pygame.K_LEFT]:
                                print("Forward Left")
                                cv2.imwrite('training_images/Imageframe{:>05}.jpg'.format(frame), roi)
                                image_array = np.vstack((image_array, temp_array))
                                label_array = np.vstack((label_array, self.k[0]))
                                saved_frame += 1

                                payload = dict(data='e')
                                response = requests.post(self.restUrl, params=payload)
                                print(response, payload, 'sent to server.')

                        if event.type == pygame.KEYUP:
                            print("release")
                            payload = dict(data='x')
                            response = requests.post(self.restUrl, params=payload)
                            print(response, payload, 'sent to server.')
            
                pygame.display.update()
                keys = key_check()
                if 'Q' in keys:
                        print('exit')
                        self.send_inst = False
                        break
            # save training images and labels
            train = image_array[1:, :]
            train_labels = label_array[1:, :]

            # save training data as a numpy file
            file_name = str(int(time.time()))
            directory = "training_data"
            if not os.path.exists(directory):
                os.makedirs(directory)
            try:    
                np.savez(directory + '/' + file_name + '.npz', train=train, train_labels=train_labels)
            except IOError as e:
                print(e)

            e2 = cv2.getTickCount()
            
            # calculate streaming metrics
            time0 = (e2 - e1) / cv2.getTickFrequency()
            print('Streaming duration:', time0)

            print(train.shape)
            print(train_labels.shape)
            print('Total frame:', total_frame)
            print('Saved frame:', saved_frame)
            print('Dropped frame', total_frame - saved_frame)
            
        finally:
            self.connection.close()
            self.server_socket.close()
コード例 #14
0
    def streamingAndCollectData(self):
        saved_frame = 0
        total_frame = 0

        # collect images for training
        print('Start collecting images...')
        e1 = cv2.getTickCount()
        image_array = np.zeros((1, 38400))
        label_array = np.zeros((1, 4), 'float')

        try:
            print("Connection from: ", self.client_address)
            print("Streaming...")
            print("Press 'Q' to exit")

            stream_bytes = b''
            frame = 1
            while True:
                stream_bytes += self.connection.read(1024)
                first = stream_bytes.find(b'\xff\xd8')
                last = stream_bytes.find(b'\xff\xd9')
                self.conn.sendall(b'WA')
                if first != -1 and last != -1:
                    jpg = stream_bytes[first:last + 2]
                    stream_bytes = stream_bytes[last + 2:]
                    image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8),
                                         cv2.IMREAD_UNCHANGED)
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                    height, width = image.shape
                    # select lower half of the image
                    roi = image[int(height / 2):height, :]
                    # select lower half of the image
                    # cv2.imshow('image', roi)

                    # save streamed images
                    cv2.imwrite(
                        'training_images/Imageframe{:>05}.jpg'.format(frame),
                        roi)
                    # reshape the roi image into one row array
                    # temp_array = roi.reshape(1, 38400).astype(np.float32)
                    temp_array = roi.flatten().astype(np.float32)
                    frame += 1
                    total_frame += 1

                    # Check key stroke while streaming
                    keys = key_check()

                    if 'W' in keys and 'A' in keys:
                        print("Forward Left")
                        image_array = np.vstack((image_array, temp_array))
                        # Remember to check label_array later
                        label_array = np.vstack((label_array, self.k[0]))
                        saved_frame += 1

                        # Send key to rest api
                        payload = dict(data='WA')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'W' in keys and 'D' in keys:
                        print("Forward Right")
                        image_array = np.vstack((image_array, temp_array))
                        label_array = np.vstack((label_array, self.k[1]))
                        saved_frame += 1

                        # Send key to rest api
                        payload = dict(data='WD')
                        headers = {'content-type': 'application/json'}
                        response = requests.post(self.restUrl,
                                                 data=json.dumps(payload),
                                                 headers=headers)
                        print(response, payload, 'sent to server.')

                    elif 'S' in keys and 'A' in keys:
                        print("Reverse Left")

                        # Send key to rest api
                        payload = dict(data='SA')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'S' in keys and 'D' in keys:
                        print("Reverse Right")

                        # Send key to rest api
                        payload = dict(data='SD')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'W' in keys:
                        print("Forward")
                        saved_frame += 1
                        image_array = np.vstack((image_array, temp_array))
                        label_array = np.vstack((label_array, self.k[2]))

                        # Send key to rest api
                        payload = dict(data='W')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'S' in keys:
                        print("Reverse")
                        saved_frame += 1
                        image_array = np.vstack((image_array, temp_array))
                        label_array = np.vstack((label_array, self.k[3]))

                        # Send key to rest api
                        payload = dict(data='S')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'D' in keys:
                        print("Right")
                        saved_frame += 1
                        image_array = np.vstack((image_array, temp_array))
                        label_array = np.vstack((label_array, self.k[1]))

                        # Send key to rest api
                        payload = dict(data='D')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'A' in keys:
                        print("Left")
                        saved_frame += 1
                        image_array = np.vstack((image_array, temp_array))
                        label_array = np.vstack((label_array, self.k[0]))

                        # Send key to rest api
                        payload = dict(data='A')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'Q' in keys:
                        print('exit')
                        self.send_inst = False

                        # Send key to rest api
                        payload = dict(data='Q')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                        break

            # save training images and labels
            train = image_array[1:, :]
            train_labels = label_array[1:, :]

            # save training data as a numpy file
            np.savez('training_data_temp/test00001.npz',
                     train=train,
                     train_labels=train_labels)

            e2 = cv2.getTickCount()
            # calculate streaming duration
            time0 = (e2 - e1) / cv2.getTickFrequency()
            print('Streaming duration:', time0)

            print(train.shape)
            print(train_labels.shape)
            print('Total frame:', total_frame)
            print('Saved frame:', saved_frame)
            print('Dropped frame', total_frame - saved_frame)

        finally:
            self.connection.close()
            self.server_socket.close()
コード例 #15
0
def screen_record():
    paused = True
    starting_value = 0
    training_data = []

    while True:
        file_name = 'training_data/training_data-{}.npy'.format(starting_value)

        if os.path.isfile(file_name):
            print('File exists.', starting_value)
            starting_value += 1
        else:
            print('File does not exist.', starting_value)

            break

    while (True):
        if not paused:

            # start = time.time()

            with mss.mss() as sct:

                mon = sct.monitors[1]

                monitor = {
                    "top": mon["top"] + 405,
                    "left": mon["left"] + 720,
                    "width": 480,
                    "height": 270,
                    "mon": 1,
                }

                screen = np.array(sct.grab(monitor))

            keys = key_check()

            get_key_output(keys)
            get_mouse_output()

            print(output)

            training_data.append(
                [screen,
                 str(output),
                 getattr(CSserver, 'get_reward')()])

            reset_output()
            CSserver.reset_reward()

            # print(time.time() - start)

            if len(training_data) % 500 == 0:
                np.save(file_name, training_data)
                print('SAVED', starting_value)
                training_data = []
                starting_value += 1
                file_name = 'training_data/training_data-{}.npy'.format(
                    starting_value)

        keys = key_check()
        if 'T' in keys:
            if paused:
                paused = False
                print('unpaused!')
                time.sleep(1)
            else:
                print('Pausing!')
                paused = True
                time.sleep(1)
コード例 #16
0
ファイル: testing.py プロジェクト: itsDarMan96/SelfDrivingCar
def screen_record():
    last_time = time.time()
    count = 0

    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

        # 800x600 windowed mode
    stopped = False
    while (True):

        if not stopped:

            screen = grab_screen(region=(0, 400, 800, 600))
            screen = cv2.cvtColor(screen, cv2.COLOR_BGR2GRAY)
            screen = cv2.resize(screen, (128, 128))

            pred = trained_model.predict([screen.reshape(1, 128, 128, 1)])[0]
            moves = list(np.around(pred))
            #            moves = np.array(pred)*np.array([4.5, 0.1, 0.1])
            #            moves = np.argmax(moves)
            print("Move {0} with prediction {1}".format(moves, pred))

            turn_prob = 0.75
            no_turn_prob = 0.70
            #            if pred[1] > no_turn_prob:
            #                go_straight()
            #            elif pred[0] > turn_prob:
            #                go_left()
            #            elif pred[2] > turn_prob:
            #                go_right()
            #            else:
            #                go_straight()
            #            if moves == 0:
            #                go_straight()
            #                choice_picked = 'straight'
            #
            #            elif moves == 1:
            #                go_reverse()
            #                choice_picked = 'reverse'
            #                w
            #            elif moves == 2:
            #                go_left()
            #                choice_picked = 'left'
            #            elif moves == 3:
            #                go_right()
            #                choice_picked = 'right'
            #            elif moves == 4:
            #                go_forward_left()
            #                choice_picked = 'forward+left'
            #            elif moves == 5:
            #                go_forward_right()
            #       sss         choice_picked = 'forward+right'
            #            elif moves == 6:
            #                go_reverse_left()
            #                choice_picked = 'reverse+left'
            #            elif moves == 7:
            #                go_reverse_right()
            #                choice_picked = 'reverse+right'
            #            elif moves == 8:
            #                no_keys()
            #                choice_picked = 'nokeys'

            if moves == [1, 0, 0]:
                go_left()
            elif moves == [0, 1, 0]:
                go_straight()
            elif moves == [0, 0, 1]:
                go_right()
            else:
                go_straight()
                pass
            key = key_check()

            if 'T' in key:
                if stopped:
                    stopped = False
                    time.sleep(1)
                else:
                    stopped = True
                    ReleaseKey(A)
                    ReleaseKey(W)
                    ReleaseKey(D)
        #print("Frames capturing at {} fps".format(time.time()-last_time))
        last_time = time.time()
コード例 #17
0
    def streamingAndCollectData(self):
        saved_frame = 0
        total_frame = 0

        # collect images for training
        print('Start collecting images...')
        e1 = cv2.getTickCount()

        file_name = self.file_name
        starting_value = self.starting_value
        training_data = []
        for i in list(range(4))[::-1]:
            print(i+1)
            time.sleep(1)

        last_time = time.time()
        paused = False
        print('STARTING!!!')

        try:
            print("Connection from: ", self.client_address)
            print("Streaming...")
            print("Press 'Q' to exit")

            stream_bytes = b''
            frame = 1
            while True:
                stream_bytes += self.connection.read(1024)
                first = stream_bytes.find(b'\xff\xd8')
                last = stream_bytes.find(b'\xff\xd9')
                self.conn.sendall(b'WA')
                if first != -1 and last != -1:
                    jpg = stream_bytes[first:last + 2]
                    stream_bytes = stream_bytes[last + 2:]
                    image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_UNCHANGED)

                    # select lower half of the image
                    roi = image[120:240, :]

                    last_time = time.time()
                    # run a color convert:
                    screen = cv2.cvtColor(roi, cv2.COLOR_BGR2RGB)
                    cv2.imshow('image', screen)

                    cv2.imwrite('training_images/Imageframe{:>05}.jpg'.format(frame), screen)

                    frame += 1
                    total_frame += 1

                    # Check key stroke while streaming
                    keys = key_check()

                    if 'W' in keys and 'A' in keys:
                        print("Forward Left")
                        training_data.append([screen,self.wa])
                        saved_frame += 1

                        # Send key to rest api
                        payload = dict(data='WA')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'W' in keys and 'D' in keys:
                        print("Forward Right")
                        training_data.append([screen,self.wd])
                        saved_frame += 1

                        # Send key to rest api
                        payload = dict(data='WD')
                        headers = { 'content-type': 'application/json' }
                        response = requests.post(self.restUrl, data=json.dumps(payload), headers=headers )
                        print(response, payload, 'sent to server.')
                        

                    elif 'S' in keys and 'A' in keys:
                        print("Reverse Left")
                        training_data.append([screen,self.sa])
                        # Send key to rest api
                        payload = dict(data='SA')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')
                        

                    elif 'S' in keys and 'D' in keys:
                        print("Reverse Right")
                        training_data.append([screen,self.sd])
                        # Send key to rest api
                        payload = dict(data='SD')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')
                        

                    elif 'W' in keys:
                        print("Forward")
                        saved_frame += 1
                        training_data.append([screen,self.w])

                        # Send key to rest api
                        payload = dict(data='W')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'S' in keys:
                        print("Reverse")
                        saved_frame += 1
                        training_data.append([screen,self.s])
                        
                        # Send key to rest api
                        payload = dict(data='S')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'D' in keys:
                        print("Right")
                        saved_frame += 1
                        training_data.append([screen,self.d])
                        
                        # Send key to rest api
                        payload = dict(data='D')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')


                    elif 'A' in keys:
                        print("Left")
                        saved_frame += 1
                        training_data.append([screen,self.a])
                        
                        # Send key to rest api
                        payload = dict(data='A')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                    elif 'Q' in keys:
                        print('exit')
                        training_data.append([screen,self.nk])
                        self.send_inst = False
                        # Send key to rest api
                        payload = dict(data='Q')
                        response = requests.post(self.restUrl, data=payload)
                        print(response, payload, 'sent to server.')

                        break


            np.save(file_name,training_data)

            e2 = cv2.getTickCount()
            # calculate streaming duration
            time0 = (e2 - e1) / cv2.getTickFrequency()
            print('Streaming duration:', time0)
            print('Total frame:', total_frame)
            print('Saved frame:', saved_frame)
            print('Dropped frame', total_frame - saved_frame)
            
        finally:
            self.connection.close()
            self.server_socket.close()