Esempio n. 1
0
def main():
    run_test(TextManiTest,
             "\n=== Running test for the text manipulation functions ===\n")
    run_test(RNNTest,
             "\n=== Running test for the RNN model ===\n")
    run_test(GenerateFunctionsTest,
             "\n=== Running test for the generate functions ===\n")
    run_test(TweetGeneratorTest,
             "\n=== Running test for tweet generation ===\n")
    if user_has_key:
        run_test(BotTest,
                 "\n=== Running test for the Twitter Bot ===\n")
    else:
        print("The test for the Twitter Bot was not executed")
Esempio n. 2
0
def setup():
    global server, driver
    if not have_selenium:
        raise SkipTest("Tests require Selenium")
    default_setup()
    for step in setup_steps:
        if callable(step):
            step()
        else:
            print(step[0])  # description
            run_test(*step[1:])

    server = sarge.Command("smtweb -p 8765 --no-browser",
                           cwd=utils.working_dir,
                           stdout=sarge.Capture(),
                           stderr=sarge.Capture())
    server.run(async=True)
    driver = webdriver.Firefox()
Esempio n. 3
0
  %add_f2 = fadd double %add_f, %add_f1
  ret double %add_f2
}

Kaleidoscope >> parsed a function definition.

-- parsed result
FunctionAST{PrototypeAST{f4(z)}BinaryExprAST(BinaryExprAST(BinaryExprAST(NumExprAST(1) + NumExprAST(2)) + VarExprAST(z)) * BinaryExprAST(VarExprAST(z) + BinaryExprAST(NumExprAST(1) + NumExprAST(2))))}

-- generated LLVM IR

define double @f4(double %z) {
entry:
  %add_f = fadd double 3.000000e+00, %z
  %add_f1 = fadd double %z, 3.000000e+00
  %mul_f = fmul double %add_f, %add_f1
  ret double %mul_f
}

Kaleidoscope >> EOF\
''')

if __name__ == "__main__":
    args = [
        f'{env.TARGET_EXECUTABLE_PATH}',  # executable name
        f'{env.TESTS_DATA_DIR}/test_input.txt',  # input data path
        '-debug'  # debug_mode on
    ]
    cmd = ' '.join(args)
    utils.run_test('test_parse_default', cmd, expect)
def test_arp_p0(init):
    utils.run_test(test_arp, init.test_dir, init.benchmark_dir, '-m p0')
Esempio n. 5
0
def test_arp_i(init):
    utils.run_test(test_arp, init.test_dir, init.benchmark_dir, '-m i')
Esempio n. 6
0
def test_all():
    """Test generator for Nose."""
    if not have_psycopg2:
        raise SkipTest("Tests require psycopg2")
    if not have_docker:
        raise SkipTest("Tests require docker")
    for step in test_steps:
        if callable(step):
            step()
        else:
            run_test.description = step[0]
            yield tuple([run_test] + list(step[1:]))


if __name__ == '__main__':
    # Run the tests without using Nose.
    setup()
    for step in test_steps:
        if callable(step):
            step()
        else:
            print(step[0])  # description
            run_test(*step[1:])
    response = input(
        "Do you want to delete the temporary directory (default: yes)? ")
    if response not in ["n", "N", "no", "No"]:
        teardown()
    else:
        print("Temporary directory %s not removed" % utils.temporary_dir)
def test_hub_p0(init):
    utils.run_test(test_hub, init.test_dir, init.benchmark_dir, '-m p0')
Esempio n. 8
0

def test_all():
    """Test generator for Nose."""
    if not have_psycopg2:
        raise SkipTest("Tests require psycopg2")
    if not have_docker:
        raise SkipTest("Tests require docker")
    for step in test_steps:
        if callable(step):
            step()
        else:
            run_test.description = step[0]
            yield tuple([run_test] + list(step[1:]))


if __name__ == '__main__':
    # Run the tests without using Nose.
    setup()
    for step in test_steps:
        if callable(step):
            step()
        else:
            print step[0]  # description
            run_test(*step[1:])
    response = raw_input("Do you want to delete the temporary directory (default: yes)? ")
    if response not in ["n", "N", "no", "No"]:
        teardown()
    else:
        print "Temporary directory %s not removed" % utils.temporary_dir
Esempio n. 9
0
                 for tweet in tweet_list]
        result = all([triple[0] for triple in debug])
        self.assertTrue(result, msg="\nAll tweets\n {}".format(debug))

    def test_tweet_hashtags_content(self):
        """
        Function to test if all the tweets have the
        hashtags from the hastag list
        """
        tg = TweetGenerator(text_path=TweetGeneratorTest.data_path,
                            config=TweetGeneratorTest.config,
                            train=True,
                            debug=True)
        hastags = ["#AI", "#tensorflow"]
        tweet_list = tg.generate_tweet_list(50, "i am", hashtag_list=hastags)
        result = True
        debug = "NoProblemo"
        for tweet in tweet_list:
            condition1 = tweet.find("#AI") != -1
            condition2 = tweet.find("#tensorflow") != -1
            if not (condition1 and condition2):
                debug = tweet
                result = False
                break
        self.assertTrue(result, msg="\nProblematic tweet = {}".format(debug))


if __name__ == "__main__":
    run_test(TweetGeneratorTest,
             "\n=== Running test for tweet generation ===\n")
Esempio n. 10
0
def test_mac_learner_p0(init):
    utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir,
                   '-m p0')
Esempio n. 11
0
                         TextManiTest.first,
                         msg="result = {}".format(result))

    def test_eos(self):
        """
        Testing if the function read_line_eos
        is adding the str <eof> at the end of each line

        """
        vocab = Vocab()
        path = TextManiTest.text_path_toy
        sentence = []
        count = 0
        vocab.read_words(read_line_eos(path))
        encode = [vocab.encode(word) for word in read_line_eos(path)]
        for word in [vocab.decode(index) for index in encode]:
            if word != "<eos>":
                if count == 13:
                    sentence.append(word)
            else:
                count += 1
        result = ' '.join(sentence)
        self.assertEqual(result,
                         TextManiTest.last,
                         msg="result = {}".format(result))


if __name__ == "__main__":
    run_test(TextManiTest,
             "\n=== Running test for the text manipulation functions ===\n")
        """
        model = GenerateFunctionsTest.model
        gen_config = GenerateFunctionsTest.gen_config
        toy_train = GenerateFunctionsTest.toy_train
        with tf.Session(graph=model.graph) as sess:
            tf.global_variables_initializer().run()
            run_epoch(model, sess, toy_train, model.train_op)
            model.saver.save(sess, model.save_path)
        gen_model = RNNLanguageModel(gen_config, GenerateFunctionsTest.data)
        with tf.Session(graph=gen_model.graph) as sess:
            gen_model.saver.restore(sess, gen_model.save_path)
            result = ' '.join(
                generate_text(sess,
                              gen_model,
                              gen_config,
                              "i am",
                              stop_tokens=['<eos>']))
        self.assertEqual(type(result),
                         str,
                         msg="not str\n type(result) ={}".format(type(result)))
        self.assertTrue(len(result) > 1,
                        msg="len(result) = {}".format(len(result)))
        test1 = result[-5:] == '<eos>'
        test2 = len(result.split()) == 102
        self.assertTrue(test1 or test2, msg="result = {}".format(result))


if __name__ == "__main__":
    run_test(GenerateFunctionsTest,
             "\n=== Running test for the generate functions ===\n")
Esempio n. 13
0
def test_arp_r0(init):
    utils.run_test(test_arp, init.test_dir, init.benchmark_dir, '-m r0')
Esempio n. 14
0
    @classmethod
    def tearDown(cls):
        check_path = os.path.join(currentdir, "checkpoints")
        logs_path = os.path.join(currentdir, "logs")
        if os.path.exists(check_path):
            shutil.rmtree(check_path)
        if os.path.exists(logs_path):
            shutil.rmtree(logs_path)

    def test_otimization(self):
        """
        Testing if the perpexity on the valid data
        is going down
        """
        model = RNNTest.model
        toy_valid = RNNTest.toy_valid
        toy_train = RNNTest.toy_train
        with tf.Session(graph=model.graph) as sess:
            tf.global_variables_initializer().run()
            before_training = run_epoch(model, sess, toy_valid)
            run_epoch(model, sess, toy_train, model.train_op)
            after_traing = run_epoch(model, sess, toy_valid)
        self.assertTrue(before_training > after_traing,
                        msg="before = {0}\nafter = {1}".format(
                            before_training, after_traing))


if __name__ == "__main__":
    run_test(RNNTest, "\n=== Running test for the RNN model ===\n")
Esempio n. 15
0
def test_hub_i(init):
    utils.run_test(test_hub, init.test_dir, init.benchmark_dir, '-m i')
Esempio n. 16
0
def get_result(dataset_name,
               target_model,
               task,
               kargs,
               sampled_dir='',
               debug=debug,
               cache=cache):
    rs = utils.RandomState()
    rs.save_state()
    rs.set_seed(0)
    embedding_filename = utils.get_names(target_model, **kargs)
    if task == 'classification':
        cf = os.path.abspath(
            os.path.join('result/{}'.format(dataset_name), sampled_dir, 'cf',
                         embedding_filename))
    elif task == 'link_predict':
        cf = os.path.abspath(
            os.path.join('result/{}'.format(dataset_name), sampled_dir, 'lp',
                         embedding_filename))
    embedding_filename = os.path.abspath(
        os.path.join('embeddings/{}'.format(dataset_name), sampled_dir,
                     embedding_filename))
    dataset_filename = os.path.abspath(
        os.path.join('data/{}'.format(dataset_name), sampled_dir,
                     'graph.edgelist'))
    if target_model != 'gcn':
        if (not cache) or (not os.path.exists(embedding_filename)) or (
                os.path.getmtime(embedding_filename) <
                os.path.getmtime(dataset_filename)):
            utils.run_target_model(target_model,
                                   dataset_filename,
                                   os.path.dirname(embedding_filename),
                                   embedding_test_dir=embedding_test_dir,
                                   debug=debug,
                                   **kargs)
        if (not cache) or (not os.path.exists(cf)) or (
                os.path.getmtime(cf) < os.path.getmtime(embedding_filename)):
            if task == 'classification':
                labels = os.path.abspath(
                    os.path.join(os.path.dirname(dataset_filename),
                                 'label.txt'))
            elif task == 'link_predict':
                labels = os.path.abspath(
                    os.path.join(os.path.dirname(dataset_filename)))
            utils.run_test(task,
                           dataset_name, [embedding_filename],
                           labels,
                           cf,
                           embedding_test_dir=embedding_test_dir)
    else:
        if (not cache) or (not os.path.exists(cf)):
            data_path = os.path.abspath(
                os.path.join('data/{}'.format(dataset_name)))
            with utils.cd(
                    os.path.join(embedding_test_dir, 'src/baseline/gcn/gcn')):
                cmd = ('python3 main.py' +\
                        ' --epochs {} --hidden1 {} --learning_rate {}' +\
                        ' --output_filename {} --debug {} --dataset {} --input_dir {}').format(kargs['epochs'], kargs['hidden1'], kargs['learning_rate'], cf, debug, dataset_name, data_path)
                if debug:
                    print(cmd)
                else:
                    cmd += ' > /dev/null 2>&1'
                os.system(cmd)
    rs.load_state()
    res = np.loadtxt(cf, dtype=float)
    if len(res.shape) != 0:
        res = res[0]
    return res
Esempio n. 17
0
def test_hub_r0(init):
    utils.run_test(test_hub, init.test_dir, init.benchmark_dir, '-m r0')
def test_hub_i(init):
    utils.run_test(test_hub, init.test_dir, init.benchmark_dir, '-m i')
define double @f4(double %z) {
entry:
  %add_f = fadd double 3.000000e+00, %z
  %add_f1 = fadd double %z, 3.000000e+00
  %mul_f = fmul double %add_f, %add_f1
  ret double %mul_f
}

Kaleidoscope >> EOF\
''')

if __name__ == "__main__":
    args0 = [
        f'{env.TARGET_EXECUTABLE_PATH}',
        f'{env.TESTS_DATA_DIR}/test_input.txt',
        '-debug',
        '-pass',
    ]
    cmd0 = ' '.join(args0)
    utils.run_test('test_parse_pass_enable 1', cmd0, expect)

    args1 = [
        f'{env.TARGET_EXECUTABLE_PATH}',
        f'{env.TESTS_DATA_DIR}/test_input.txt',
        '-pass',  # flipped
        '-debug',
    ]
    cmd1 = ' '.join(args1)
    utils.run_test('test_parse_pass_enable 2', cmd1, expect)
Esempio n. 20
0
from actions_ui.common import PopupMsgTempaltes
from actions_ui.shopping_list import sign_up_user
from actions_ui.page_objects.shipping_list_page import ShoppingListPage


def test_signup_user(func_browser):
    shopping_list = ShoppingListPage(func_browser, True)
    username = sign_up_user(shopping_list)
    # TODO: don't resolve this problem for remote browser :(
    # shopping_list.wait_popup_hidden(PopupMsgTempaltes.SIGN_UP)
    active_username = shopping_list.get_active_username()
    assert username == active_username


if __name__ == '__main__':
    from utils import run_test
    run_test(__file__)
def test_arp_i(init):
    utils.run_test(test_arp, init.test_dir, init.benchmark_dir, '-m i')
def test_mac_learner_p0(init):
    utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m p0')
Esempio n. 23
0
    @classmethod
    def tearDown(cls):
        check_path = os.path.join(currentdir, "checkpoints")
        logs_path = os.path.join(currentdir, "twitter_log")
        if os.path.exists(check_path):
            shutil.rmtree(check_path)
        if os.path.exists(logs_path):
            shutil.rmtree(logs_path)

    def test_log(self):
        """
        Everytime we create one bot he saves the
        twitter status in a csv file. This function tests
        if he is saving the correct information.
        """
        Bot(corpus=BotTest.data_path)
        self.assertTrue(os.path.exists(BotTest.csv_path),
                        msg="Not writing csv for the first time")
        Bot(corpus=BotTest.data_path)
        df = pd.read_csv(BotTest.csv_path)
        self.assertEqual(df.shape, (2, 4), msg="Wrong Shape\n {}".format(df))


if __name__ == "__main__":
    key_path = os.path.join(parentdir, "agent", "key.py")
    if os.path.exists(key_path):
        run_test(BotTest, "\n=== Running test for the Twitter Bot ===\n")
    else:
        print("No file in the path \n {}".format(key_path))