Beispiel #1
0
    def __init__(self):
        super(T4Env, self).__init__()
        # Define action and observation space
        # They must be gym.spaces objects
        # Example when using discrete actions:
        self.action_space = spaces.Discrete(2)
        # Example for using image as input:

        self.game = serpent.initialize_game('T4v2')

        game_frame = self.game.screen_regions['GAME_REGION']
        width = game_frame[2] - game_frame[0]
        height = game_frame[3] - game_frame[1]
        self.observation_space = spaces.Box(low=0,
                                            high=255,
                                            shape=(int(width / 2),
                                                   int(height / 2), 1),
                                            dtype=np.uint8)

        self.game.launch()
        self.input_controller = InputController(game=self.game)

        self.sell_point = (671, 447)
        self.buy_point = (669, 476)

        self.visual_debugger = VisualDebugger()
        self.scraper = T4Scraper(game=self.game,
                                 visual_debugger=self.visual_debugger)
Beispiel #2
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        self.game = kwargs["game"]
        self.game.api

        self.config = config.get(f"{self.__class__.__name__}Plugin") or dict()

        self.redis_client = StrictRedis(**config["redis"])

        self.input_controller = kwargs["input_controller"]
        self.machine_learning_models = dict()

        self.frame_handlers = dict(
            NOOP=self.handle_noop,
            COLLECT_FRAMES=self.handle_collect_frames,
            COLLECT_FRAME_REGIONS=self.handle_collect_frame_regions,
            COLLECT_FRAMES_FOR_CONTEXT=self.handle_collect_frames_for_context,
            RECORD=self.handle_record)

        self.frame_handler_setups = dict(
            COLLECT_FRAMES=self.setup_collect_frames,
            COLLECT_FRAME_REGIONS=self.setup_collect_frame_regions,
            COLLECT_FRAMES_FOR_CONTEXT=self.setup_collect_frames_for_context,
            RECORD=self.setup_handle_record)

        self.frame_handler_pause_callbacks = dict(
            COLLECT_FRAMES=self.on_collect_frames_pause,
            COLLECT_FRAME_REGIONS=self.on_collect_frame_regions_pause,
            COLLECT_FRAMES_FOR_CONTEXT=self.
            on_collect_frames_for_context_pause,
            RECORD=self.on_record_pause)

        self.reward_functions = dict(TEST=self.reward_test)

        self.frame_handler_setup_performed = False

        self.visual_debugger = VisualDebugger()

        self.game_frame_buffer = GameFrameBuffer(
            size=self.config.get("game_frame_buffer_size", 5))
        self.game_context = None

        self.sprite_identifier = SpriteIdentifier()
        self._register_sprites()

        self.analytics_client = AnalyticsClient(
            project_key=config["analytics"]["topic"])

        if config["analytics"]["broadcast"]:
            self.analytics_client.track(event_key="RESET_DASHBOARD", data={})

        self.flag = None

        self.uuid = str(uuid.uuid4())
        self.started_at = datetime.now()

        self.kwargs = kwargs
Beispiel #3
0
    def __init__(
        self,
        input_shape=None,
        input_mapping=None,
        replay_memory_size=10000,
        batch_size=32,
        action_space=None,
        max_steps=1000000,
        observe_steps=None,
        initial_epsilon=1.0,
        final_epsilon=0.1,
        gamma=0.99,
        model_file_path=None,
        model_learning_rate=2.5e-4,
        override_epsilon=False
    ):
        self.type = "DQN"
        self.input_shape = input_shape
        self.replay_memory = ReplayMemory(memory_size=replay_memory_size)
        self.batch_size = batch_size
        self.action_space = action_space
        self.action_count = len(self.action_space.combinations)
        self.action_input_mapping = self._generate_action_space_combination_input_mapping(input_mapping)
        self.frame_stack = None
        self.max_steps = max_steps
        self.observe_steps = observe_steps or (0.1 * replay_memory_size)
        self.current_observe_step = 0
        self.current_step = 0
        self.initial_epsilon = initial_epsilon
        self.final_epsilon = final_epsilon
        self.previous_epsilon = initial_epsilon
        self.epsilon_greedy_q_policy = EpsilonGreedyQPolicy(
            initial_epsilon=self.initial_epsilon,
            final_epsilon=self.final_epsilon,
            max_steps=self.max_steps
        )
        self.gamma = gamma
        self.current_action = None
        self.current_action_index = None
        self.current_action_type = None
        self.first_run = True
        self.mode = "OBSERVE"

        self.model_learning_rate = model_learning_rate
        self.model = self._initialize_model()

        if model_file_path is not None:
            self.load_model_weights(model_file_path, override_epsilon)

        self.model_loss = 0

        self.visual_debugger = VisualDebugger()
Beispiel #4
0
    def __init__(self, **kwargs):
        super().__init__(**kwargs)

        self.frame_handlers['PLAY'] = self.handle_play

        self.frame_handler_setups['PLAY'] = self.setup_play

        self.value = None
        #print('Sprites')
        #print(type(self.game.sprites))
        #print('game')
        #print(self.game)
        #print('game type')
        #print(type(self.game))

        self.spriteGO = self.game.sprites.get('SPRITE_GAME_OVER')
        self.spriteWO = self.game.sprites.get('SPRITE_GAME_WON')
        self.spriteGirl = self.game.sprites.get('SPRITE_BETTY_0')

        self.printer = TerminalPrinter()
        self.visual_debugger = VisualDebugger()
        self.gamestate = Game()
Beispiel #5
0
    def __init__(self):
        super(T4Env, self).__init__()
        # Define action and observation space
        # They must be gym.spaces objects
        # Example when using discrete actions:
        self.action_space = spaces.Discrete(2)
        # Example for using image as input:

        self.observation_space = spaces.Box(low=0,
                                            high=255,
                                            shape=(0, 0, 0),
                                            dtype=np.uint8)

        self.game = serpent.initialize_game('T4v2')
        self.game.launch()
        self.input_controller = InputController(game=self.game)

        self.sell_point = (671, 447)
        self.buy_point = (669, 476)

        self.visual_debugger = VisualDebugger()
        self.scraper = T4Scraper(game=self.game,
                                 visual_debugger=self.visual_debugger)
    def __init__(self, size=5):
        self.size = size
        self.frames = list()

        self.visual_debugger = VisualDebugger()
Beispiel #7
0
    def __init__(self, buckets=None):
        super().__init__()

        self.visual_debugger = VisualDebugger(buckets=buckets)
        self.canvas = None
Beispiel #8
0
    def __init__(self, fake=False, metrics_key='001'):
        with open('running', 'w') as f:
            f.write(str(os.getpid()))
        
        self._episode_ended = False

        self.game = serpent.initialize_game('T4TF1')

        game_frame = self.game.screen_regions['GAME_REGION']
        self.width = 10
        self.height = 10

        self.state_shape = (int(self.height / 2), int(self.width / 2), 1)
        self._action_spec = array_spec.BoundedArraySpec(
            shape=(), dtype=np.int32, minimum=0, maximum=1, name='action')
        self._observation_spec = array_spec.BoundedArraySpec(
            shape=self.state_shape, dtype=np.float32, minimum=0.0, name='observation')


        self._state = np.zeros(self.state_shape).astype(np.float32)

        if fake:
            return
        self.interrupted = False

        self.game.launch()
        self.game.start_frame_grabber()
        self.input_controller = InputController(game=self.game)
        # self.input_proc = 

        self.frame_buffer = FrameGrabber.get_frames([0])
        self.frame_buffer = self.extract_game_area(self.frame_buffer)

        self.width = self.frame_buffer[0].shape[1]
        self.height = self.frame_buffer[0].shape[0]
        print('width: %d' % self.width)
        print('height: %d' % self.height)
        self.state_shape = (self.height, self.width, 3)
        self._action_spec = array_spec.BoundedArraySpec(
            shape=(), dtype=np.int32, minimum=0, maximum=1, name='action')
        self._observation_spec = array_spec.BoundedArraySpec(
            shape=self.state_shape, dtype=np.float32, minimum=0.0, name='observation')

        self._state = np.zeros(self.state_shape).astype(np.float32)

        # print('created input with pid: %s' % self.input_proc.pid)
        self.sell_keys = [KeyboardKey.KEY_LEFT_SHIFT, KeyboardKey.KEY_LEFT_CTRL, KeyboardKey.KEY_S]
        self.buy_keys = [KeyboardKey.KEY_LEFT_SHIFT, KeyboardKey.KEY_LEFT_CTRL, KeyboardKey.KEY_B]
        self.step_keys = [KeyboardKey.KEY_LEFT_SHIFT, KeyboardKey.KEY_LEFT_CTRL, KeyboardKey.KEY_F]


        self.visual_debugger = VisualDebugger()

        self.scraper = T4Scraper(game=self.game, visual_debugger=self.visual_debugger)
        frame = self.game.grab_latest_frame()
        self.scraper.current_frame = frame
        self.pl = 0
        self.working_trade = 0
        self.current_action = ''
        self.held = False
        self.fill_count = 0

        self.window_controller = WindowController()
        self.window_id = self.window_controller.locate_window(".*Mini-Dow .*")
        # self.window_id = self.window_controller.locate_window(".*S&P .*")

        self.keys = RedisKeys(metrics_key)
#         self.redis = redis.Redis(port=6001)
    
        self.number_of_trades = 0
        self.number_of_wins = 0
        self.buys = 0
        self.sells = 0
        self.holds = 0
        self.history = list()
        self.actions = 0
        self.last_action = ''

        self.previous_write = -1
        self.get_metadata()
        
        self.active_frame = None
        
        self.start_time = time.time()
        
        self.step_read_time = 0
        self.step_write_time = 0
Beispiel #9
0
import os
import pickle
import subprocess

import skimage.io
import skimage.transform
import skimage.color
import skimage.filters

import numpy as np

import sklearn.svm

from serpent.config import config

visual_debugger = VisualDebugger()


class SVMContextClassifier(ContextClassifier):
    def __init__(self, input_shape=None):
        super().__init__()
        self.input_shape = input_shape

    def train(self, epochs=3, preprocessing_func=None):
        data = list()
        targets = list()

        for root, dirs, files in os.walk("datasets/current/training"):
            if root != "datasets/current/training":
                current_label = root.split("/")[-1]