Пример #1
0
    def __init__(self, fan_controller: FanController):
        self._data_manager = DataManager()
        self._fan_controller = fan_controller
        self._poe_device = _init_poe_device(fan_controller=fan_controller)
        self._lcd_display = _init_lcd_display()

        self._data_manager.register(self._fan_controller)
        self._data_manager.register(self._poe_device)
        self._data_manager.register(self._lcd_display)
Пример #2
0
class DeviceManager:
    def __init__(self, fan_controller: FanController):
        self._data_manager = DataManager()
        self._fan_controller = fan_controller
        self._poe_device = _init_poe_device(fan_controller=fan_controller)
        self._lcd_display = _init_lcd_display()

        self._data_manager.register(self._fan_controller)
        self._data_manager.register(self._poe_device)
        self._data_manager.register(self._lcd_display)

    def splash(self):
        self._lcd_display.splash()

    def update(self):
        self._data_manager.update()

    def clear(self):
        self._poe_device.clear()
        self._lcd_display.clear()

    def stop(self):
        self._fan_controller.force_on()
        self._poe_device.clear()
        self._lcd_display.clear()
Пример #3
0
 def create_pve_combat(player: Player) -> CombatTeam:
     if player.battles_fought < 2:
         opponent = BattleManager._tutorial_opponent(player)
     else:
         opponent = BattleManager._get_random_opponent(player)
     player_team = CombatTeam.from_team(player.team)
     Combat([player_team], [opponent], data_manager=DataManager())
     return player_team
Пример #4
0
 def create_duel(player: Player, other_player: Player) -> None:
     """
     Start a fight between two players.
     """
     Combat([CombatTeam.from_team(player.team)],
            [CombatTeam.from_team(other_player.team)],
            data_manager=DataManager(),
            allow_flee=False,
            allow_items=False)
def split_data(raw_data_path, path_to_save, split_parts=100):
    """
    Args:
        raw_data_path: path to the original csv data
        path_to_save: path to .hdf5 data
        split_parts: number parts to split
    """

    saver = DataManager(path_to_save)

    TIMESTEPS_IN_ALL = 629145480
    counter = 0
    for chunk in pd.read_csv(raw_data_path,
                             chunksize=TIMESTEPS_IN_ALL // split_parts):
        data: np.ndarray = chunk.values
        signal = (data[:, 0]).astype('int16')
        time_left = (data[:, 1]).astype('float32')

        name_group = "0" * (4 - len(str(counter))) + str(counter)
        saver.push(name_group + "/signal", signal, 'int16')
        saver.push(name_group + "/time_left", time_left, 'float32')

        counter += 1
        print("{} / {} blocks were processed".format(counter, split_parts))
        if counter == 100:
            break
Пример #6
0
 async def _finish_purchase(self) -> None:
     for selected_item in self._selected_values:
         self.shop.buy(selected_item, self.player)
     self.toggled = []
     data_manager = DataManager()
     data_manager.update_player(self.player)
     data_manager.update_inventory(self.player)
     await self.render()
Пример #7
0
 async def _confirm(self):
     if self.player.num_elementals > 0 or self._selected_value is None:
         return
     starter = self._selected_value
     elemental = ElementalInitializer.make(starter, level=self.player.level)
     self.player.add_elemental(elemental)
     data_manager = DataManager()
     data_manager.update_player(self.player)
     data_manager.update_elemental(elemental)
     await Form.from_form(self, StatusView)
Пример #8
0
def make_data(loader: DataManager, saver: DataManager, part_name, timesteps,
              step):
    signal = loader.get(part_name + '/signal')
    time_left = loader.get(part_name + '/time_left')

    start = 0
    while True:
        stop = start + timesteps
        try:
            signal_part: np.ndarray = signal[start:stop]
            time_value = time_left[stop]
        except IndexError:
            print("Process of block {} was finished".format(part_name))
            break
        signal_mean = np.mean(signal_part)
        fourier_part = np.abs(np.fft.rfft(signal_part - signal_mean))

        name = hashlib.md5(signal_part.tostring()).hexdigest()
        saver.push(name + '/signal', signal_part, 'int16')
        saver.push(name + '/fourier', fourier_part, 'float32')
        saver.push(name + '/time', time_value, 'float32')

        start += step
Пример #9
0
 def _save(self) -> None:
     data_manager = DataManager()
     data_manager.update_elemental(self.elemental)
Пример #10
0
import discord
from discord.ext import commands

from src.combat.battle_manager import BattleManager
from src.data.data_manager import DataManager
from src.discord_token import TOKEN
from src.shop.general_shop import GeneralShop
from src.ui.view_router import ViewRouter

description = "Collect elementals and battle them!"
bot = commands.Bot(command_prefix=';', description=description)
client = discord.Client()
view_manager: ViewRouter = None
battle_manager: BattleManager = BattleManager()
data_manager: DataManager = DataManager()


@bot.event
async def on_ready():
    global view_manager
    view_manager = ViewRouter(bot)
    print("Monbot is ready!")


@bot.command(pass_context=True)
async def menu(ctx):
    user = ctx.message.author
    await view_manager.delete_message(ctx.message)
    if user.bot:
        return
    player = data_manager.get_created_player(user)
Пример #11
0
 async def _show_versus(self) -> None:
     options = VersusFormOptions(self.bot, self.player, DataManager(),
                                 self.discord_message.server,
                                 self.discord_message, self)
     await VersusForm(options).show()
Пример #12
0
 async def _show_summon(self) -> None:
     options = SummonMenuOptions(self.bot, self.player, DataManager(),
                                 self.discord_message, self)
     await SummonMenu(options).show()
Пример #13
0
        saver.push(name + '/signal', signal_part, 'int16')
        saver.push(name + '/fourier', fourier_part, 'float32')
        saver.push(name + '/time', time_value, 'float32')

        start += step


if __name__ == "__main__":
    ORIGINAL_DATA_PATH = "../../data/interim/train.hdf5"
    TRAIN_DATA_PATH = "../../data/processed/train.hdf5"
    VALID_DATA_PATH = "../../data/processed/valid.hdf5"

    TIMESTEPS = 150000
    STEP = 30000

    load_manager = DataManager(ORIGINAL_DATA_PATH)
    train_manager = DataManager(TRAIN_DATA_PATH)
    valid_manager = DataManager(VALID_DATA_PATH)

    train_parts, valid_parts = train_test_split(load_manager.names_from(),
                                                test_size=0.1,
                                                random_state=13)

    for n, name in enumerate(train_parts):
        make_data(load_manager, train_manager, name, TIMESTEPS, STEP)
        print("{} / {} for train data set was processed".format(
            n, len(train_parts)))

    for n, name in enumerate(valid_parts):
        make_data(load_manager, valid_manager, name, TIMESTEPS, STEP)
        print("{} / {} for valid data set was processed".format(
Пример #14
0
 async def _select_leader(self) -> None:
     elemental = self._selected_value
     self.player.team.set_leader(elemental)
     DataManager().update_player(self.player)  # TODO patch
     self._selecting_leader_mode = False
     await self.render()
Пример #15
0
 def _save(self) -> None:
     data_manager = DataManager()
     data_manager.update_inventory(self.player)
     data_manager.update_elemental(self.recently_affected_elemental)
Пример #16
0
def train(params, logger):
    cur_ckpt_dir = os.path.join(params.model_dir, params.cur_ckpt_dir)
    cur_checkpoint_path = os.path.join(cur_ckpt_dir,
                                       "cur_ner_model.trained_ckpt")
    # best_ckpt_dir = os.path.join(params.model_dir, params.best_ckpt_dir)
    best_checkpoint_path = os.path.join(cur_ckpt_dir, "ner_model.trained_ckpt")

    # get pre trained embeddings
    embeddings = data_utils.get_trimmed_glove_vectors(params.trimmed_file)
    print(embeddings.shape)

    # load vocabs
    words_vocab_to_id, words_id_to_vocab, _, words_vocab_size = \
        vocab_utils.create_vocabulary_mapping(params.vocab_words_file)
    chars_vocab_to_id, chars_id_to_vocab, _, chars_vocab_size = \
        vocab_utils.create_vocabulary_mapping(params.vocab_chars_file)
    tags_vocab_to_id, tags_id_to_vocab, _, tags_vocab_size = \
        vocab_utils.create_vocabulary_mapping(params.vocab_tags_file)

    params.model_params["count_vocab_source"] = words_vocab_size
    params.model_params["count_vocab_char"] = chars_vocab_size
    params.model_params["count_vocab_target"] = tags_vocab_size

    # create dataset
    eval_data = DataManager(CoNLLDataset(
        params.eval_file,
        lowercase=True,
        use_char=params.model_params["use_char"]),
                            words_vocab_to_id,
                            chars_vocab_to_id,
                            tags_vocab_to_id,
                            params.batch_size,
                            use_char=params.model_params["use_char"],
                            tag_scheme=params.tag_scheme)
    test_data = DataManager(CoNLLDataset(
        params.test_file,
        lowercase=True,
        use_char=params.model_params["use_char"]),
                            words_vocab_to_id,
                            chars_vocab_to_id,
                            tags_vocab_to_id,
                            params.batch_size,
                            use_char=params.model_params["use_char"],
                            tag_scheme=params.tag_scheme)
    train_data = DataManager(CoNLLDataset(
        params.train_file,
        lowercase=True,
        use_char=params.model_params["use_char"]),
                             words_vocab_to_id,
                             chars_vocab_to_id,
                             tags_vocab_to_id,
                             params.batch_size,
                             use_char=params.model_params["use_char"],
                             tag_scheme=params.tag_scheme)
    len_train_data = train_data.data_size
    nbatches = (len_train_data + params.batch_size - 1) // params.batch_size

    # create model
    model_creator = locate(params.model)
    train_model = model_helper.create_model(
        model_creator,
        params.model_params,
        mode=tf.contrib.learn.ModeKeys.TRAIN)
    eval_model = model_helper.create_model(model_creator,
                                           params.model_params,
                                           mode=tf.contrib.learn.ModeKeys.EVAL)
    infer_model = model_helper.create_model(
        model_creator,
        params.model_params,
        mode=tf.contrib.learn.ModeKeys.INFER)
    # get config proto
    config_proto = misc_utils.get_config_proto(params.log_device_placement,
                                               params.allow_soft_placement)

    train_sess = tf.Session(config=config_proto, graph=train_model.graph)
    eval_sess = tf.Session(config=config_proto, graph=eval_model.graph)
    infer_sess = tf.Session(config=config_proto, graph=infer_model.graph)

    with train_model.graph.as_default():
        loaded_train_model, global_step = model_helper.create_or_load_model(
            train_sess,
            train_model.model,
            cur_ckpt_dir,
            name="train",
            loaded_vector=embeddings)

    # start run epoch
    # losses = []
    best_score = 0
    nepoch_no_imprv = 0
    for epoch in range(params.num_epochs):
        prog = Progbar(target=nbatches)
        for i, batched_data in enumerate(train_data.minibatches()):
            # batched_data = train_data.pad_batch(words, labels)
            _, loss, global_step = loaded_train_model.train(
                train_sess, batched_data)
            prog.update(i + 1, [("train loss", loss)])
            # save current check point
        loaded_train_model.saver.save(train_sess, cur_checkpoint_path)

        logger.info(" epoch {} finished.".format(epoch + 1))
        logger.info(" start internal evaluate")
        eval_loss, test_loss = run_internal_evaluate(eval_sess, eval_model,
                                                     cur_ckpt_dir, eval_data,
                                                     test_data)
        logger.info([
            " eval_loss:{:04.3f}".format(eval_loss),
            " test_loss:{:04.3f} ".format(test_loss)
        ])

        logger.info(" start external evaluate")
        eval_lines, test_lines = run_external_evaluate(infer_sess, infer_model,
                                                       cur_ckpt_dir,
                                                       words_id_to_vocab,
                                                       tags_id_to_vocab,
                                                       eval_data, test_data)

        for line in eval_lines:
            logger.info(line)
        if test_lines is not None:
            for line in test_lines:
                logger.info(line)
        eval_f1 = float(eval_lines[1].strip().split()[-1])

        if eval_f1 > best_score:
            nepoch_no_imprv = 0
            loaded_train_model.saver.save(train_sess,
                                          best_checkpoint_path,
                                          global_step=epoch + 1)
            best_score = eval_f1
            logger.info("- new best score!")
        else:
            nepoch_no_imprv += 1
            if nepoch_no_imprv >= params.nepoch_no_imprv:
                logger.info(
                    "- early stopping {} epochs without improvement".format(
                        nepoch_no_imprv))
                break
Пример #17
0
 def _save(self) -> None:
     # TODO patch
     DataManager().update_elemental(self.elemental)