Esempio n. 1
0
def train(batch_size=32, load_weights=True, data_dir="data/train", val_dir="data/validation", epochs=1):
    model = make_model()
    model.summary()
    if load_weights:
        model.load_weights(MODEL_PATH)

    img_gen = ImageDataGenerator(
        rotation_range=10,
        width_shift_range=0.1,
        height_shift_range=0.1,
        rescale=1.0 / 255,
        shear_range=0.02,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode="constant",
        cval=255,
    )
    train_gen = img_gen.flow_from_directory(
        directory=data_dir, target_size=(IM_HEIGHT, IM_WIDTH), batch_size=batch_size, classes=CLASSES
    )
    val_gen = img_gen.flow_from_directory(
        directory=val_dir, target_size=(IM_HEIGHT, IM_WIDTH), batch_size=batch_size, classes=CLASSES
    )
    model.fit_generator(train_gen, validation_data=val_gen, epochs=epochs)
    save_model(model, MODEL_PATH)
Esempio n. 2
0
    def save(self, keras_model, fold=None):
        date_time = datetime.now().strftime("%Y-%m-%d_%H%M%S")
        if fold:
            f_name = f"trained_models/relationship_clf_fold_{str(fold)}_{date_time}"
        else:
            f_name = f"trained_models/relationship_clf_{date_time}"

        # save Keras model to a different file
        save_model(keras_model, f_name + '.h5')

        with open(f"{f_name}" + '.pkl', 'wb') as f_out:
            pickle.dump(self, f_out)
    def save(self, uri):
        """
        Saves the model into a given filename.
        The model uses 4 files: one for the encoder, other for the decoder, other
        for the autoencoder and one for the class options in JSON format.
        :param uri: base filename.
        """
        pf = PyFolder(os.path.dirname(os.path.realpath(uri)), allow_override=True)
        pf[os.path.basename(uri)+"_options.json"] = {
            'input_cells': self._input_cells,
            'latent_space': self._latent_space,
        }

        save_model(self._autoencoder, uri+"_lstm_autoencoder.hdf5")
        save_model(self._encoder, uri+"_lstm_encoder.hdf5")
Esempio n. 4
0
 def save_data_on_disk(self):
     # store vocabulary, train and val data
     store_vocabulary(self.vocabulary_dir, self.vocabulary,
                      self.word_index_dict, self.index_word_dict,
                      self.max_cap_len)
     store_train_data(self.train_dir, self.train_captions,
                      self.train_images_as_vector)
     store_val_data(self.train_dir, self.val_captions,
                    self.val_images_as_vector)
     save_model(self.model, self.model_file)
     with open(self.dataset_name_file, "w") as f:
         f.write(self.dataset.get_name())
     with open(self.last_epoch_file, "w") as f:
         f.write(str(self.last_epoch))
     with open(self.total_epoch_file, "w") as f:
         f.write(str(self.total_epochs))
     return
Esempio n. 5
0
def _train_until_last_epoch(model, x_train, y_train, config, best_epoch=None):
    config.checkpoint = False
    config.early_stopping = False
    config.histogram_freq = 0  #No val data
    config.n_epochs = best_epoch
    save_cfg(config)

    callbacks_list = get_pib_callbacks(config)
    model.fit(x_train,
              y_train,
              batch_size=config.batch_size,
              epochs=config.n_epochs,
              verbose=config.verbose,
              callbacks=callbacks_list,
              validation_split=0.,
              shuffle=False)
    ckpt_path = config.checkpoint_path + "/full.{:02d}-{:.5f}.hdf5".format(
        best_epoch, 0)
    save_model(model, ckpt_path, overwrite=True, include_optimizer=True)
Esempio n. 6
0
 def __getstate__(self):
     with temp(suffix='.hdf5', delete=True) as f:
         save_model(self, f.name, overwrite=True)
         model_str = f.read()
     return {'model_str': model_str}
Esempio n. 7
0
def dqn(conf: AgentConf):
    env = Tetris()

    agent = DQNAgent(env.get_state_size(),
                     n_neurons=conf.n_neurons,
                     activations=conf.activations,
                     epsilon=conf.epsilon,
                     epsilon_min=conf.epsilon_min,
                     epsilon_stop_episode=conf.epsilon_stop_episode,
                     mem_size=conf.mem_size,
                     discount=conf.discount,
                     replay_start_size=conf.replay_start_size)

    timestamp_str = datetime.now().strftime("%Y%m%d-%H%M%S")
    # conf.mem_size = mem_size
    # conf.epochs = epochs
    # conf.epsilon_stop_episode = epsilon_stop_episode
    # conf.discount = discount
    log_dir = f'logs/tetris-{timestamp_str}-ms{conf.mem_size}-e{conf.epochs}-ese{conf.epsilon_stop_episode}-d{conf.discount}'
    log = CustomTensorBoard(log_dir=log_dir)

    print(f"AGENT_CONF = {log_dir}")

    scores = []

    episodes_wrapped: Iterable[int] = tqdm(range(conf.episodes))
    for episode in episodes_wrapped:
        current_state = env.reset()
        done = False
        steps = 0

        # update render flag
        render = True if conf.render_every and episode % conf.render_every == 0 else False

        # game
        while not done and (not conf.max_steps or steps < conf.max_steps):
            next_states = env.get_next_states()
            best_state = agent.best_state(next_states.values())

            # find the action, that corresponds to the best state
            best_action = None
            for action, state in next_states.items():
                if state == best_state:
                    best_action = action
                    break

            reward, done = env.hard_drop([best_action[0], 0],
                                         best_action[1],
                                         render=render)

            agent.add_to_memory(current_state, next_states[best_action],
                                reward, done)
            current_state = next_states[best_action]
            steps += 1

        # just return score
        scores.append(env.get_game_score())

        # train
        if episode % conf.train_every == 0:
            # n = len(agent.memory)
            # print(f" agent.memory.len: {n}")
            agent.train(batch_size=conf.batch_size, epochs=conf.epochs)

        # logs
        if conf.log_every and episode and episode % conf.log_every == 0:
            avg_score = mean(scores[-conf.log_every:])
            min_score = min(scores[-conf.log_every:])
            max_score = max(scores[-conf.log_every:])
            log.log(episode,
                    avg_score=avg_score,
                    min_score=min_score,
                    max_score=max_score)
    # save_model
    save_model(agent.model,
               f'{log_dir}/model.hdf',
               overwrite=True,
               include_optimizer=True)
 def _save_model(self):
     save_model(self.model, self.model_name)