Example #1
0
 def run_encoding_algorithm(self, text):
     """Run LZW after Huffman"""
     huf_res = HuffmanCompress(text)
     lzw_res = LZW(huf_res.compress_data())
     res = lzw_res.encode_text()
     self.huf_codes = huf_res.codes_dict
     self.lzw_codes = lzw_res.code_dict
     self.code_intervals = lzw_res.encoded_insert_length
     return res
Example #2
0
 def Node_QL_v2_run(self, rounds):
     print('Running Node_QLearning v2')
     lzw = LZW()
     # 建立初始Q表
     state = lzw.compress(self.env.observation)
     _, actions = self.env.get_actions2()
     self.agent.create_state_qtable(state, actions, [0] * len(actions))
     # 訓練迴圈
     score = 0
     for _ in range(rounds):
         # 儲存動作list
         self.save_action = []
         # 新一次訓練開始
         self.env.reset(refresh_frame=True)
         # 初始化觀測值
         state = lzw.compress(self.env.observation)
         while True:
             actions = self.env.get_feasible_actions()
             # 選擇下一步位置
             action = self.agent.choose_action(state)
             pos = self.env.n2p[action]
             # 採取行動並獲得觀測和獎勵值
             index = actions.index(action)
             # 儲存動作
             self.save_action.append(index)
             ending, reward = self.env.step(actions[index],
                                            return_reward=True,
                                            refresh_frame=self.switch)
             state_ = lzw.compress(self.env.observation)
             if ending == 'clear':
                 score = self.env.player.hp
             if ending == 'continue':
                 done = False
                 # 檢查下一狀態的Q表是否存在
                 if state_ not in self.agent.q_table:
                     rewards, actions = self.env.get_actions2()
                     if actions:
                         # 建立Q表
                         self.agent.create_state_qtable(
                             state_, actions, rewards)
                     else:
                         # 刪除前次狀態的行動
                         self.agent.q_table[state].drop(action)
                         ending = 'stop'
                         done = True
             else:
                 done = True
             yield pos, done, _, score, self.save_action
             # 從過程中學習
             self.agent.learn(state, action, reward, state_, done)
             # 將狀態傳到下一次循環
             state = state_
             # 如果走到終點,結束本回合
             if done:
                 break
Example #3
0
 def Node_Sarsa_run(self, rounds):
     print('Running Node_Sarsa')
     pos = None
     self.save_action = []
     lzw = LZW()
     # 建立初始狀態
     state = lzw.compress(self.env.observation)
     self.agent.create_state_qtable(state, self.env.get_actions())
     # 訓練迴圈
     score = 0
     for _ in range(rounds):
         # 新一次訓練開始
         self.env.reset(refresh_frame=True)
         # 根據觀察選擇行動
         state = lzw.compress(self.env.observation)
         action = self.agent.choose_action(state)
         while True:
             # 採取行動並獲得觀測和獎勵值
             ending, reward = self.env.step(action,
                                            return_reward=True,
                                            refresh_frame=self.switch)
             state_ = lzw.compress(self.env.observation)
             if ending == 'clear':
                 score = self.env.player.hp
                 # 儲存通關路徑
                 save_observation = self.env.observation.copy()
                 self.save_action_ = []
                 self.env.reset()
                 for action in save_observation[len(self.env.observation):]:
                     self.save_action_.append(
                         self.env.get_feasible_actions().index(action))
                     self.env.step(action)
                 self.save_action = self.save_action_.copy()
             if ending == 'continue':
                 done = False
                 # 檢查下一狀態的Q表是否存在
                 if state_ not in self.agent.q_table:
                     self.agent.create_state_qtable(state_,
                                                    self.env.get_actions())
                 # 根據下次觀察選擇行動
                 action_ = self.agent.choose_action(state_)
                 pos = self.env.n2p[action_]
             else:
                 done = True
                 action_ = None
             yield pos, done, _, score, self.save_action
             # 從過程中學習
             self.agent.learn(state, action, reward, state_, action_, done)
             # 更新狀態和行動
             state = state_
             action = action_
             # 如果走到終點,結束本回合
             if done:
                 break
Example #4
0
def to_string(lz: LZW):
    """Helper function used to expand a LZW into a string"""
    sio = io.StringIO()
    for c in lz.expand():
        sio.write(c)

    return sio.getvalue()
Example #5
0
    def QLfD_run(self, rounds):
        print('Running QLfD')
        lzw = LZW()
        state = lzw.compress(self.env.observation)
        actions = self.env.get_feasible_actions()
        self.agent.create_state_qtable(state, actions, [0] * len(actions))
        score = 0
        for episode in range(rounds):
            self.env.reset(refresh_frame=True)
            save_action = []
            state = lzw.compress(self.env.observation)
            while True:
                actions = self.env.get_feasible_actions()
                action = self.agent.choose_action(state)
                pos = self.env.n2p[action]
                best_action, weights = self.agent.predict(actions)
                index = actions.index(action)
                # 儲存遊玩路徑
                save_action.append(index)
                ending, _ = self.env.step(actions[index],
                                          return_reward=True,
                                          refresh_frame=self.model_switch)
                state_ = lzw.compress(self.env.observation)
                if ending == 'clear':
                    score = self.env.player.hp
                if ending == 'continue':
                    done = False
                    if state_ not in self.agent.q_table:
                        rewards, actions = self.env.get_actions2()
                        if actions:
                            # 建立Q表
                            self.agent.create_state_qtable(
                                state_, actions, rewards)
                        else:
                            # 刪除前次狀態的行動
                            self.agent.q_table[state].drop(action)
                            ending = 'stop'
                            done = True
                else:
                    done = True
                yield pos, done, episode, score, save_action
                self.agent.learn(state, action, best_action, state_, done)
                state = state_

                if done:
                    break
Example #6
0
    def test_lzw_char_string(self):
        """LZWTest the pattern of a single character followed by a string consisting
        of that character.
        """
        lzw = LZW(
            [0x41, 0x100, 0x42, 0x102, 0x43, 0x104, 0x44, 0x106, 0x45, 0x108])

        self.assertEqual("AAABBBCCCDDDEEE", to_string(lzw))
Example #7
0
    def test_lzw_mixed_case0(self):
        """LZWTest a codewords which represent both characters and strings."""
        lzw = LZW([
            0x41, 0x42, 0x43, 0x44, 0x100, 0x104, 0x101, 0x44, 0x101, 0x5A,
            0x5A
        ])

        self.assertEqual("ABCDABABABCDBCZZ", to_string(lzw))
Example #8
0
    def test_lzw_growing(self):
        """LZWTest consecutive cases of the corner case.
        The corner case is: Whilst decoding, the codeword in the dictionary does not exist.

        This is when the current codeword for instance 'A' is the next codeword 'AA'.
        """
        lzw = LZW([0x41, 0x100, 0x101, 0x102, 0x103])

        self.assertEqual("AAAAAAAAAAAAAAA", to_string(lzw))
Example #9
0
    def test_lzw_single(self):
        """LZWTest the basic simple case where each codeword is a single character
        and there are no strings.
        """
        lzw = LZW([
            0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x4B,
            0x4C, 0x4D, 0x4E, 0x4F, 0x50
        ])

        self.assertEqual("ABCDEFGHIJKLMNOP", to_string(lzw))
Example #10
0
    def test_lzw_reset0(self):
        """LZWTest whether or not resetting the table works as expected.

        The expected behaviour is that strings put in the string table
        will not be used. This is due to the table being reset.
        """
        lzw = LZW([
            0x41, 0x42, 0x43, 0x100, 0x102, 0x42, 0x43, 0x41, 0x42, 0x43, 0x41,
            0x100
        ],
                  table_size=260)

        self.assertEqual("ABCABCABCABCABC", to_string(lzw))
Example #11
0
    def print_score(self, score: List[int], episode: List[int], titel: str):
        plt.title(titel)
        plt.plot()
        plt.plot(episode, score)
        plt.ylabel('score')
        plt.xlabel('episode')
        plt.show()

if __name__ == '__main__':
    from environment import Mota
    env = Mota()
    env.build_env('standard_map')
    env.create_nodes()
    agent = imitation_form_QLearning(env)
    lzw = LZW()

    state = lzw.compress(env.observation)

    actions = env.get_feasible_actions()

    agent.create_state_qtable(state, actions, [0] * len(actions))

    best_score = 0
    score = 0
    rounds = 1000
    best_score_list = []
    time_list = []
    for episode in range(rounds):
        print('Running QLfD')
        env.reset()
Example #12
0
                coding = OmegaCoding
        elif opt == "--mode":
            if arg == "encoding":
                mode = "e"
            elif arg == "decoding":
                mode = "d"
            else:
                print("invalid --mode")
                exit(usage_help)

    if mode == "e":
        encoded = list(
            map(
                # offsetting the numbers by one because of universal coding limitations
                lambda x: x + 1,
                LZW.encode(input_file)))

        coding.encode(encoded, output_file)

        # print the stats
        print("encoded number list entropy:",
              Entropy.encoded_file_entropy(encoded))
        print("original file entropy      :",
              Entropy.original_file_entropy(input_file))

        original_size = os.path.getsize(input_file)
        encoded_size = os.path.getsize(output_file)
        print("original file size         :", original_size)
        print("encoded file size          :", encoded_size)
        print("compression rate           :", original_size / encoded_size)
Example #13
0
    def test_lzw_alternate_pattern(self):
        """LZWTest alternating pattern between char and string starting with a double char
        and ending with a char."""
        lzw = LZW([0x41, 0x42, 0x100, 0x43, 0x102, 0x44, 0x104, 0x45])

        self.assertEqual("ABABCABCDABCDE", to_string(lzw))
Example #14
0
from  argparse import ArgumentParser
from lzw import LZW


parser = ArgumentParser(description = "Implementacja algorytmu kompresji słownikowej \
        bezstratnej LZW, wykonana w języku Python v3.6, jako projekt na\
        Podstwy Kompresji Danych laboratoria.\nNależy skorzystać tylko i wyłącznie z jednego trybu pracy programu na raz.")
filename = ""
bits = 0
parser.add_argument('filename', metavar="FILE", type=str, help="nazwa pliku, który chcemy skompresować / zdekompresować")
parser.add_argument('bits', metavar="BITS", type=int, help="ilość bitów wykorzystywana do określenia maksymalnej pojemności słownika (nie zaleca się korzystania z liczb poniżej 8)")
parser.add_argument('-c', dest="compress", action="store_true", help="Tryb pracy - kompresja")
parser.add_argument('-d', dest="decompress", action="store_true", help="Tryb pracy - dekompresja")

args = parser.parse_args()
print(args)
if args.decompress and args.compress:
    raise ValueError("Wybrano więcej niż jeden tryb pracy")

algorithm = LZW(max_table_size=args.bits)
if args.compress:
    algorithm.compress(args.filename)
elif args.decompress:
    algorithm.decompress(args.filename)


Example #15
0
 def test_lzw_encode_text(self):
     txt = "beep boop beer!"
     code = LZW(txt)
     self.assertEqual(code.encode_text(), "0110")