Beispiel #1
0
    def run(self):
        """ Method that runs forever """
        while True:
            # If the exit is set, quit the process
            if self.exit.is_set():
                cv2.destroyAllWindows()
                break

            # Do something
            if self.done:

                # Prevent another grab from happening until this one is finished
                self.done = False

                # Grab the pixels
                im = grabscreen.grab_screen(region=self.bbox)

                # Resize if allowed
                if self.resize:
                    im = cv2.resize(im, self.resize_size)

                # Render if allowed
                if self.render is True:
                    helper.render_image(im, name="Render")

                # Add the image to the memory
                self.screen_mem.append(im)

                # Allow the next grab to happen
                self.done = True
Beispiel #2
0
    def get_screen(self, d):
        element = d.find_element_by_class_name("runner-canvas")
        img = d.execute_script(
            "return arguments[0].toDataURL('img/png').substring(21)", element)
        img = base64.b64decode(img)
        img = Image.open(BytesIO(img)).convert('L')  # Convert it to grayscale
        # img = Image.open(BytesIO(img))  # Without converting
        img = img.crop((0, 0, 300, 150))
        img = np.asarray(img)

        # Render the image if enabled:
        if self.render:
            helper.render_image(img)

        return img
Beispiel #3
0
def run():
    num_classes = 2
    image_shape = (160, 576)
    data_dir = './data'
    runs_dir = './runs'
    tests.test_for_kitti_dataset(data_dir)

    # Download pretrained vgg model
    helper.maybe_download_pretrained_vgg(data_dir)

    # Hyperparameters
    epochs = 30
    batch_size = 5

    # 20,20,stddev=0.01 - Epoch: 20/20... Iteration: 290 Training loss: 0.09508
    # 20,10,stddev=0.01 - Epoch: 20/20... Iteration: 570 Training loss: 0.07547
    # 40,5,stddev=0.01 - Epoch: 40/40... Iteration: 2310 Training loss: 0.02939
    # 40,5,stddev=0.001 - Epoch: 40/40... Iteration: 2310 Training loss: 0.03463
    # 20,5,stddev=0.01 - Epoch: 20/20... Iteration: 1150 Training loss: 0.06885

    # OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
    # You'll need a GPU with at least 10 teraFLOPS to train on.
    #  https://www.cityscapes-dataset.com/

    with tf.Session() as sess:
        # Path to vgg model
        vgg_path = os.path.join(data_dir, 'vgg')
        # Create function to get batches
        get_batches_fn = helper.gen_batch_function(
            os.path.join(data_dir, 'data_road/training'), image_shape)

        # OPTIONAL: Augment Images for better results
        #  https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network

        correct_label = tf.placeholder(tf.int32,
                                       [None, None, None, num_classes],
                                       name='correct_label')
        learning_rate = tf.placeholder(tf.float32, name='learning_rate')

        # Build NN using load_vgg, layers, and optimize function
        image_input, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(
            sess, vgg_path)
        layer_output = layers(layer3_out, layer4_out, layer7_out, num_classes)

        saver = tf.train.Saver()

        # Train NN using the train_nn function
        logits, train_op, cross_entropy_loss = optimize(
            layer_output, correct_label, learning_rate, num_classes)
        train_nn(sess, epochs, batch_size, get_batches_fn, train_op,
                 cross_entropy_loss, image_input, correct_label, keep_prob,
                 learning_rate)

        # Save inference data using helper.save_inference_samples
        helper.save_inference_samples(runs_dir, data_dir, sess, image_shape,
                                      logits, keep_prob, image_input)

        saver.save(sess, './runs/model.ckpt')

        # OPTIONAL: Apply the trained model to a video
        clip = VideoFileClip("challenge_video.mp4")
        new_frames = [
            helper.render_image(sess, logits, keep_prob, image_input, frame,
                                image_shape) for frame in clip.iter_frames()
        ]
        new_clip = ImageSequenceClip(new_frames, fps=clip.fps)
        new_clip.write_videofile("new_file.mp4")
Beispiel #4
0
 def get_screen(self):
     screen = np.load('screen.npy')
     print(screen.shape)
     helper.render_image(screen[0])
Beispiel #5
0
    # Create the environment
    env = Environment(render=False)
    agent = DQNAgent((320, 160, 4), action_size=3, memory_size=5000)
    agent.load("agent.h5")
    max_score = 146

    for i in range(1000):
        # Get the initial values
        state, score, done = env.start_game()
        a = 0
        # Loop until game over
        while not done:
            action = agent.act(state)
            next_state, score, done = env.act(env.available_actions[action])
            reward = score if not done else -10
            agent.remember(state, action, reward, next_state, done)
            state = next_state
            a += 1
            helper.render_image(state[0])

        print("Game over")
        if score > max_score:
            max_score = score
            agent.save("agent.h5")

        aps = a / (time.time() - env.start_time)
        print("Episode: {}, score:{}, max score: {}, actions/sec: {}".format(
            i, score, max_score, aps))

        agent.replay(2024)