Exemplo n.º 1
0
    def test_get_sign(self):
        dim = 100
        act = 10
        gen = Generator(dim, act)

        signs = [str(i) for i in range(10)]
        sign_index = TrieSignIndex(gen, vocabulary=signs)

        for s in signs:
            self.assertTrue(sign_index.contains(s))
            id = sign_index.get_id(s)
            self.assertTrue(sign_index.contains_id(id))
            s2 = sign_index.get_sign(id)
            self.assertEqual(s,s2)\



        #get sign for an id that doesn't exist
        id = 86
        s = sign_index.get_sign(id)
        self.assertEqual(s,None)
        self.assertFalse(sign_index.contains_id(id))

        self.assertEqual(len(sign_index.sign_trie),len(signs))

        self.assertTrue(sign_index.contains_id(len(signs)-1))
        self.assertFalse(sign_index.contains_id(len(signs)))
Exemplo n.º 2
0
top10f = list(frequencies[0:10])
top10ids = [trie.get(top10w[i]) for i in range(10)]
top10w_trie = [trie.restore_key(i) for i in top10ids]

print(top10w)
print(top10f)
print(top10w_trie)

ri_gen = Generator(dim=1000, num_active=10)

t0 = time.time()
sign_index = TrieSignIndex(ri_gen, list(vocabulary[:]))
t1 = time.time()
print(t1 - t0)

print(top10ids)
top10w_index = [sign_index.get_sign(i) for i in top10ids]
print(top10w_index)

#test load top ten
print("=============================================")
index = TrieSignIndex(generator=ri_gen, vocabulary=top10w)
print(top10w)
top10ids = [index.get_id(w) for w in top10w]
print(top10ids)
freq = TrieSignIndex.map_frequencies(top10w, top10f, index)
top10freq = [freq[i] for i in top10ids]
print(top10freq)

h5v.close()
Exemplo n.º 3
0
    def test_nce_nrp(self):
        vocab_size = 1000
        k = 500
        s = 8
        embed_size = 128
        nce_samples = 10
        noise_ratio = 0.1
        use_nce = True

        vocab = [str(i) for i in range(vocab_size)]

        generator = Generator(k, s)
        sign_index = TrieSignIndex(generator,
                                   vocabulary=vocab,
                                   pregen_indexes=True)
        ris = [
            sign_index.get_ri(sign_index.get_sign(i))
            for i in range(len(sign_index))
        ]
        # ris = [generator.generate() for _ in range(vocab_size)]

        ri_tensor = ris_to_sp_tensor_value(ri_seq=ris,
                                           dim=k,
                                           all_positive=False)

        ri_tensor_input = tx.SparseInput(n_units=k, value=ri_tensor)

        if use_nce:
            label_inputs = tx.SparseInput(k, name="target_random_indices")
        else:
            label_inputs = [
                tx.Input(1, dtype=tf.int64, name="ids"),
                tx.InputParam(dtype=tf.int32,
                              value=vocab_size,
                              name="vocab_size")
            ]

        eval_label_inputs = [
            tx.Input(1, dtype=tf.int64, name="ids_eval"),
            tx.InputParam(dtype=tf.int32, value=vocab_size, name="vocab_size")
        ]

        model = NRP(
            run_inputs=tx.SparseInput(n_units=k, name="random_index_inputs"),
            label_inputs=label_inputs,
            eval_label_input=eval_label_inputs,
            ctx_size=2,
            # vocab_size=vocab_size,
            k_dim=k,
            ri_tensor_input=ri_tensor_input,  # current dictionary state
            embed_dim=embed_size,
            h_dim=128,
            num_h=1,
            h_activation=tx.relu,
            use_dropout=True,
            embed_dropout=True,
            keep_prob=0.70,
            use_nce=use_nce,
            nce_samples=nce_samples,
            nce_noise_amount=noise_ratio,
            noise_input=tx.SparseInput(k, name="noise"))

        tf.summary.histogram("embeddings", model.embeddings.weights)
        for h in model.h_layers:
            tf.summary.histogram("h", h.linear.weights)

        # model.eval_tensors.append(model.train_loss_tensors[0])
        runner = tx.ModelRunner(model)
        runner.set_log_dir("/tmp")
        runner.log_graph()

        options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        # options = None
        runner.set_session(runtime_stats=True, run_options=options)

        # options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)

        # runner.config_optimizer(tf.train.GradientDescentOptimizer(learning_rate=0.005))#,
        # SGD with 0.025

        # lr = tx.InputParam(init_value=0.0002)
        lr = tx.InputParam(value=0.025)
        # runner.config_optimizer(tf.train.AdamOptimizer(learning_rate=lr.tensor, beta1=0.9), params=lr,
        runner.config_optimizer(
            tf.train.GradientDescentOptimizer(learning_rate=lr.tensor),
            optimizer_params=lr,
            global_gradient_op=False,
            # gradient_op=lambda grad: tf.clip_by_global_norm(grad, 10.0)[0])
            gradient_op=lambda grad: tf.clip_by_norm(grad, 1.0))

        data = np.array([[0, 2], [5, 7], [9, 8], [3, 4], [1, 9], [12, 8]])
        labels = np.array([[32], [56], [12], [2], [5], [23]])

        ppl_curve = []
        n = 256
        batch_size = 128

        dataset = np.column_stack((data, labels))
        # print(dataset)
        dataset = views.repeat_it([dataset], n)
        dataset = views.flatten_it(dataset)
        # shuffle 5 at a time
        dataset = views.shuffle_it(dataset, 6)
        dataset = views.batch_it(dataset, batch_size)

        # print(np.array(list(dataset)))
        # d = list(views.take_it(1, views.shuffle_it(d, 4)))[0]

        data_stream = dataset

        for data_stream in tqdm(data_stream, total=n * 5 / batch_size):
            sample = np.array(data_stream)

            ctx = sample[:, :-1]
            ctx.flatten()
            ctx = ctx.flatten()
            ctx_ris = [sign_index.get_ri(sign_index.get_sign(i)) for i in ctx]
            ctx_ris = ris_to_sp_tensor_value(
                ctx_ris,
                dim=sign_index.feature_dim(),
                all_positive=not sign_index.generator.symmetric)
            lbl_ids = sample[:, -1:]
            lbl = lbl_ids.flatten()

            if use_nce:
                lbl_ris = [
                    sign_index.get_ri(sign_index.get_sign(i)) for i in lbl
                ]
                lbl_ris = ris_to_sp_tensor_value(
                    lbl_ris,
                    dim=sign_index.feature_dim(),
                    all_positive=not sign_index.generator.symmetric)

                noise = generate_noise(k_dim=k,
                                       batch_size=lbl_ris.dense_shape[0] *
                                       nce_samples,
                                       ratio=noise_ratio)
                runner.train(ctx_ris, [lbl_ris, noise],
                             output_loss=True,
                             write_summaries=True)
            else:
                runner.train(model_input_data=ctx_ris,
                             loss_input_data=lbl_ids,
                             output_loss=True,
                             write_summaries=True)

        runner.close_session()