예제 #1
0
def get_env(name):
    if name == 'cartpole':
        env = gym.make('CartPole-v0')
        env._max_episode_steps = 500
        return env
    elif name == 'pong':
        return pong.Pong(800, 600, int(400/2), int(200/2))
예제 #2
0
    def __init__(self, width, height, frame_rate):

        # title of the application is ""
        game_mouse.Game.__init__(self, "Pong", width, height, frame_rate)
        # create a Picture instance
        self.mPong = pong.Pong(width, height)
        return
예제 #3
0
    def __init__( self, title, width, height, frame_rate ):

        game.Game.__init__( self, title, width, height, frame_rate )
        
        # create a game instance
        # YOU SHOULD CHANGE THIS TO IMPORT YOUR GAME MODULE
        self.mGame = pong.Pong( width, height )
        return
예제 #4
0
button = digitalio.DigitalInOut(board.A3)
ui.Screen.setEncoder(encoder, button)
ui.Screen.setDisplay(d)

screen = ui.Screen(True)
button = ui.Button(screen, 38, 32, 38, 16, "Pong", func)
framerate = ui.Text(screen, 0, 8, int(1 / loopTime), max_glyphs=3)
num1 = ui.SingleDigitNumberSelector(screen, 116 - 24, 32)
num2 = ui.SingleDigitNumberSelector(screen, 116, 32)
num3 = ui.SingleDigitNumberSelector(screen, 140, 32)
num4 = ui.SingleDigitNumberSelector(screen, 140 + 24, 32)

pong = ui.Screen()
# back = ui.Button(pong, 128, 32, 38, 16, 11, "Back", func2)

p = game.Pong(d, encoder)
pPress = False

pmenu = ui.Screen()
resume = ui.Button(pmenu, 48, 32, 68, 16, "Resume", pResume)
restart = ui.Button(pmenu, 128, 32, 68, 16, "Restart", pRestart)
exit = ui.Button(pmenu, 256 - 48, 32, 68, 16, "Exit", pExit)

# Main Loop
while True:
    # d.setFont(11)
    framerate.label.text = int(1 / loopTime)
    ui.update()
    if ui.Screen.current == pong:
        p.update()
        if ui.Screen.button.value == 0:
예제 #5
0
    screen = turtle.Screen()
    screen.setup(width, height)
    screen.title("Pong")
    screen.bgcolor("black")
    rectCors = ((-70, 10), (70, 10), (70, -10), (-70, -10))
    screen.register_shape("rectangle", rectCors)
    return screen


screen = init_screen(
    WIDTH, HEIGHT
)  # Init the Screen, must be before initing panels (pongs) becuase pongs may not create a rectangular shape

screen.tracer(0)

pong1 = pong.Pong(0)
pong2 = pong.Pong(1)

p1_score = score.ScoreBoard(5, (-100, 270))
p2_score = score.ScoreBoard(5, (100, 270))
direction = random.choice(directions)
ball = ball.Ball(direction)

pong1_up = ball.ycor() + 35
pong_1_down = ball.ycor() - 35
pong2_up = ball.ycor() + 35
pong2_down = ball.ycor() - 35

force = ball.return_force(MAGNITUDE, direction)

des_point = force.desired_point
예제 #6
0
        "--server_port",
        dest="port",
        type=int,
        default=1080,
        help="Port on which host server for camera is running")
    args = parser.parse_args()

    grabber = utility.WebcamVideoStream((args.host, args.port))
    if args.debug: print("Camera Resolution:", grabber.cam_size)
    grabber.start()

    # Create our signal handler and connect it
    handler = SignalHandler(grabber)
    signal.signal(signal.SIGINT, handler)

    game = pong.Pong()

    # Kernal's for image processing
    kernel_morp = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
    kernel_erod = np.ones((3, 3), np.uint8)

    # Parameters for image processing
    threshold = 60  #  BINARY threshold
    blurValue = 51  # GaussianBlur parameter
    bgSubThreshold = 100
    historyL, historyR = 0, 0
    learningRate = 0

    # Camera crop parameters
    cropLY, cropLX = (0, grabber.cam_size[0]), (0, 100)
    cropRY, cropRX = (0, grabber.cam_size[0]), (540, grabber.cam_size[1])
예제 #7
0
파일: run_pong.py 프로젝트: ApGa/pong_ml
import pong
import sys

commandLineError = "Please enter a command line argument as follows:\n" \
 + "'user' if you want to play\n" \
 + "'nn' if you want a neural network to play\n" \
 + "'generator' if you want a perfect non-ml based bot to play\n"

generatorError = "Note: If you choose generator, enter a second argument specifying:\n" \
 + "'True' if you want it to generate a dataset and 'False' otherwise\n"

if (len(sys.argv) < 2):
    print(commandLineError + generatorError)
else:
    if (sys.argv[1] == 'user'):
        game = pong.Pong('user')
        game.runLoop()

    elif (sys.argv[1] == 'nn'):
        game = pong.Pong('nn')
        game.runLoop()

    elif (sys.argv[1] == 'generator'):
        if (len(sys.argv) < 3):
            print(generatorError)
        elif (sys.argv[2] == 'True' or sys.argv[2] == 'False'):
            game = pong.Pong('generator', sys.argv[2])
            game.runLoop()
        else:
            print(generatorError)
예제 #8
0
        # Console output thread
        thread_console_output = console.ConsoleOutput(shared.q_console_output)
        thread_console_output.start()

        # Command interpreter thread
        thread_command_interpreter = command_interpreter.CommandInterpreter(
            shared.q_command_interpreter)
        thread_command_interpreter.start()

        # Console input thread
        thread_console_input = console.ConsoleInput()
        thread_console_input.daemon = True
        thread_console_input.start()

        # Pong thread
        thread_pong = pong.Pong(shared.q_pong)
        thread_pong.start()

        # AI thread
        thread_ai = ai.AI(shared.q_ai)
        thread_ai.start()

        # Player thread
        thread_player = player.Player(shared.q_player)
        thread_player.start()

        # PROGRAM RUNS HERE TILL EXIT

        # wait for all threads to be stopped with their various exit signals
        # (except for threads which are daemonic and will be killed by sys.exit)
        thread_console_output.join()
예제 #9
0
__author__ = 'Matt Geiger'

import pong

if __name__ == '__main__':
    app = pong.Pong()
    app.run()
예제 #10
0
def main():

    if not training_mode:
        agent = Agent(optimizer='rmsprop',
                      mode=learning_mode,
                      load_trained_model=True)
        exploration_rate = 0
    else:
        agent = Agent(optimizer='rmsprop',
                      mode=learning_mode,
                      load_trained_model=False)
        exploration_rate = initial_exploration_rate
    print(agent.action_network.summary())

    episode = 1
    counter = 0

    while episode <= max_episode:
        # learning_mode indicate whether it learns from low_dims or high_dims
        game = pong.Pong(mode=learning_mode)
        done = False

        training_history = []
        if learning_mode == 'low_dims':
            state_1 = np.expand_dims(game.GetPresentFrame(normalize=True),
                                     axis=0)
        elif learning_mode == 'high_dims':
            frame = game.GetPresentFrame()
            frame = cv2.cvtColor(
                cv2.resize(frame, (input_dims[0], input_dims[1])),
                cv2.COLOR_BGR2GRAY)
            ret, frame = cv2.threshold(frame, 1, 255, cv2.THRESH_BINARY)
            # stack frames, that is our input tensor
            state_1 = np.stack((frame, frame, frame, frame), axis=2)
            state_1 = state_1.reshape((1, input_dims[0], input_dims[1], 4))

        while not done:
            for event in pygame.event.get():
                if event.type == QUIT:
                    sys.exit()

            Q = agent.action_network.predict(state_1)
            print('Q value is {}'.format(np.max(Q)))
            if np.random.random_sample() > exploration_rate:
                action = np.argmax(Q)
                # print(Q)
            else:
                action = np.random.randint(3)
            # get next state and reward
            if learning_mode == 'low_dims':
                reward, state_2 = game.GetNextFrame(action, normalize=True)
                state_2 = np.expand_dims(state_2, axis=0)
                if game.total_score < -20:
                    done = True
            elif learning_mode == 'high_dims':
                reward, frame = game.GetNextFrame(action, normalize=True)
                frame = cv2.cvtColor(
                    cv2.resize(frame, (input_dims[0], input_dims[1])),
                    cv2.COLOR_BGR2GRAY)
                ret, frame = cv2.threshold(frame, 1, 255, cv2.THRESH_BINARY)
                frame = np.reshape(frame, (1, input_dims[0], input_dims[1], 1))
                state_2 = np.append(frame, state_1[:, :, :, 0:3], axis=3)
                if game.total_score < -20:
                    done = True

            if training_mode:
                agent.remember(state_1, action, reward, state_2, done)

                if counter > observation_steps:
                    if counter == observation_steps + 1:
                        print('learning start!')
                    history = agent.experience_replay()

                    if counter % 100 == 0:
                        training_history.append([episode, history, np.max(Q)])
                        pd.DataFrame(
                            training_history,
                            columns=['episode', 'loss', 'Q']).to_csv(
                                'playing_pong-master/training_history.csv')
                        print('Episode {} : {}th frame: loss {}, Q {}'.format(
                            episode, counter - observation_steps,
                            history * 1000, np.max(Q)))

                    if counter % K == 0:
                        agent.train_target(tau)

                    if counter % 1000 == 0:
                        agent.save_model()
                        print('Made a copy of model')

                # decay exploration rate
                if exploration_rate >= final_exploration_rate:
                    exploration_rate -= exploration_step

            # update state and counter
            counter += 1
            state_1 = state_2

        print('Episode {} : total score is {}'.format(episode,
                                                      game.total_score))
        episode += 1
예제 #11
0
def main():
    spyral.director.push(pong.Pong())