def env_setup(self): super(TestSSHConfig, self).env_setup() env.use_ssh_config = True env.ssh_config_path = support("ssh_config") # Undo the changes FabricTest makes to env for server support env.user = env.local_user env.port = env.default_port
def iterateDataset(values): uniqueItems = getUniqueItems(values) sets = getSets(uniqueItems) confidenceList = [] supportList = [] for item in uniqueItems: temp = support(item, values) if temp >= minSupp: supportList.append({"support": temp, "item": item}) dataSetStats(values) print("\nSupport\n") if len(supportList) > 0: for item in supportList: print(f'Support: {item["support"]} \tItem Name: {item["item"]}') else: print("No items match minimmum support criterria") for item in sets: temp = confidence(item, values) if temp >= minConf: confidenceList.append({"conf": temp, "item": item}) print("\nConfidence\n") if len(confidenceList) > 0: for item in confidenceList: print(f'Confidence: {item["conf"]}\t Itemset : {item["item"]}') else: print("No items match minimmum confidence criterria")
def test_just_ssh_config(self): """ No env.key_filename + valid ssh_config = ssh value """ with settings(use_ssh_config=True, ssh_config_path=support("ssh_config")): for val in ["", []]: with settings(key_filename=val): eq_(key_filenames(), ["foobar.pub"])
def test_both(self): """ Both env.key_filename + valid ssh_config = both show up w/ env var first """ with settings(use_ssh_config=True, ssh_config_path=support("ssh_config")): with settings(key_filename="bizbaz.pub"): eq_(key_filenames(), ["bizbaz.pub", "foobar.pub"]) with settings(key_filename=["bizbaz.pub", "whatever.pub"]): expected = ["bizbaz.pub", "whatever.pub", "foobar.pub"] eq_(key_filenames(), expected)
def test_real_connection(self): """ Test-server connection using ssh_config values """ with settings( hide('everything'), ssh_config_path=support("testserver_ssh_config"), host_string='testserver', ): ok_(run("ls /simple").succeeded)
def main_task(): eq_(env.port, '2200') execute(sub_task_one, hosts=['localhost:2201']) with settings(port='2202'): execute(sub_task_two, hosts=['localhost']) with settings(use_ssh_config=True, ssh_config_path=support("ssh_config")): run = execute(sub_task_three, hosts='myhost') return run['myhost']
def main_task(): eq_(env.port, "2200") execute(expect_host_string_port, hosts=["localhost:2201"]) with settings(port="2202"): execute(expect_env_port, hosts=["localhost"]) with settings(use_ssh_config=True, ssh_config_path=support("ssh_config")): run = execute(expect_per_host_config_port, hosts="myhost") return run["myhost"]
def main_task(): eq_(env.port, '2200') execute(expect_host_string_port, hosts=['localhost:2201']) with settings(port='2202'): execute(expect_env_port, hosts=['localhost']) with settings(use_ssh_config=True, ssh_config_path=support("ssh_config")): run = execute(expect_per_host_config_port, hosts='myhost') return run['myhost']
def main_task(): eq_(env.port, '2200') execute(expect_host_string_port, hosts=['localhost:2201']) with settings(port='2202'): execute(expect_env_port, hosts=['localhost']) with settings( use_ssh_config=True, ssh_config_path=support("ssh_config") ): run = execute(expect_per_host_config_port, hosts='myhost') return run['myhost']
username="******", password="******") # run_bot(reddit_instance, input_file_weight_pairs) generated_comment = generate_comment(input_file_weight_pairs, state_size) print("generated comment: {}".format(generated_comment)) result = utils.ngram_similarity_absolute(generated_comment, input_files[0], state_size) print( "the most ngrams that the generated comment shares with a sentence in the corpus is: {}" .format(result[0])) print("that sentence in the corpus is: {}".format(result[1])) result = utils.ngram_similarity_percentage(generated_comment, input_files[0], state_size) print( "the highest percentage of ngrams that a sentence in the corpus has in common with the generated comment is: {}" .format(result[0])) print("that sentence in the corpus is: {}".format(result[1])) result = utils.support(generated_comment, input_files[0], state_size) print( "the sentences in the corpus that provide support for the ngrams in the generated comment are: {}" .format(result)) result = utils.ngram_frequencies(generated_comment, input_files[0], state_size) print("the frequencies of the ngrams in the generated comment are: {}". format(result))