Example #1
0
    def __init__(self, embed_matrix):
        self.graph = tf.Graph()
        with self.graph.as_default():
            # Input
            self.input_matrix = tf.constant(embed_matrix, name="embed_matrix")
            self.word_ids = tf.placeholder(tf.int32,
                                           shape=[None],
                                           name="word_ids")
            # 圧縮したい分散表現 (batch_size,embed_size)
            self.input_embeds = tf.nn.embedding_lookup(self.input_matrix,
                                                       self.word_ids,
                                                       name="input_embeds")

            # Codebooks
            self.codebooks = tf.get_variable("codebook",
                                             [hp.M * hp.K, hp.embed_size])

            # Encoding
            self.logits = encode(self.input_embeds)  # (batch_size, M, K)
            # Discretization
            self.D = gumbel_softmax(self.logits,
                                    hp.tau_value)  # (batch_size,M,K)
            self.gumbel_output = tf.reshape(
                self.D, [-1, hp.M * hp.K])  # (batch_size, M * K)
            self.maxp = tf.reduce_mean(tf.reduce_max(self.D, axis=2))

            # Decoding
            self.output_embeds = decode(
                self.gumbel_output,
                self.codebooks)  # (batch_size, M*K) * (M*K, embed_size)

            # Loss
            self.loss = tf.reduce_mean(0.5 * tf.reduce_sum(
                (self.output_embeds - self.input_embeds)**2, axis=1),
                                       name="loss")

            # Optimization
            self.train_vars = tf.trainable_variables()
            self.grads, self.global_norm = tf.clip_by_global_norm(
                tf.gradients(self.loss, self.train_vars), clip_norm=0.001)
            self.global_norm = tf.identity(self.global_norm,
                                           name="global_norm")
            self.optimizer = tf.train.AdamOptimizer(0.0001)
            self.train_op = self.optimizer.apply_gradients(zip(
                self.grads, self.train_vars),
                                                           name="train_op")
Example #2
0
    modules.yaz("\nKullanabileceğiniz komutlar şunlardır:")
    modules.yaz("""
    cikis, c : yazilimi durdurmak icin kullanilir.
    decode, d: sifreli yaziyi cozmek icin kullanılır. (Aktif degil)
    encode, e: sifrelemek icin kullanılır.
    yardim, y: yardim yazisini gosterir.
    """)


yardim()

while True:  #Dongu baslangaci
    komut = input("\n>>> ")

    if komut in ["cikis", "c"]:
        break

    elif komut in ["encode", "e"]:
        text = input("Encode edilecek yazi: ")
        modules.yaz(modules.encode(text))

    elif komut in ["decode", "d"]:
        text = input("Decode edilecek yazi: ")
        modules.yaz(modules.decode(text))

    elif komut in ["yardim", "y"]:
        yardim()

    else:
        modules.yaz("\nVar olan bir komutu yazmadınız.\n")
 def test_DecoderString_CorrectDecoding(self):
     y = np.array([[1, 0], [0, 1], [0, 1]])
     cdict = {'cat': np.array([1, 0]), 'dog': np.array([0, 1])}
     y1 = decode(y, cdict)
     y2 = ['cat', 'dog', 'dog']
     self.assertEqual(y1, y2)
 def test_Decoder1dim_CorrectDecoding(self):
     y = np.array([[1, 0], [0, 1], [0, 1]])
     cdict = {1: np.array([1, 0]), 2: np.array([0, 1])}
     y1 = decode(y, cdict)
     y2 = [1, 2, 2]
     np.testing.assert_array_almost_equal(y1, y2)
Example #5
0
 def test_DecoderString_CorrectDecoding(self):
     y = np.array([[1, 0], [0, 1], [0, 1]])
     cdict = {'cat': np.array([1, 0]), 'dog': np.array([0, 1])}
     y1 = decode(y, cdict)
     y2 = ['cat', 'dog', 'dog']
     self.assertEqual(y1, y2)
Example #6
0
 def test_Decoder1dim_CorrectDecoding(self):
     y = np.array([[1, 0], [0, 1], [0, 1]])
     cdict = {1: np.array([1, 0]), 2: np.array([0, 1])}
     y1 = decode(y, cdict)
     y2 = [1, 2, 2]
     np.testing.assert_array_almost_equal(y1, y2)