def test_this_code(self):
     self.assertEqual({0: [0, 2, 4, 6], 1: [1, 3, 5, 7]}, group_by(
         lambda x: x % 2, [0, 1, 2, 3, 4, 5, 6, 7]))
     self.assertEqual({'even': [2, 8, 10, 12], 'odd': [1, 3, 5, 9]}, group_by(
         lambda x: 'odd' if x % 2 else 'even', [1, 2, 3, 5, 8, 9, 10, 12]))
     self.assertEqual({0: [0, 3, 6], 1: [1, 4, 7], 2: [2, 5]}, group_by(
         lambda x: x % 3, [0, 1, 2, 3, 4, 5, 6, 7]))
Example #2
0
def output_stats_by_name(all_tuples_filename):

    value_index = 4
    name_index = 0  # 0 = name, 1 = lns, 2 = ns

    # this option removes names for which there is a failed read request

    folder = dirname(all_tuples_filename)

    exclude_failed_reads = True
    if exclude_failed_reads:
        failed_reads_names = select_failed_reads_names(all_tuples_filename)
        write_array(failed_reads_names.keys(), os.path.join(folder, 'failed_reads_names.txt'))
        all_tuples_filename = write_all_tuples_excluding_failed(all_tuples_filename, failed_reads_names)

    outfile1 = os.path.join(folder, 'all_by_name.txt')
    output_tuples1 = group_by(all_tuples_filename, name_index, value_index)
    write_tuple_array(output_tuples1, outfile1, p = True)
    
    outfile2 =os.path.join(folder, 'writes_by_name.txt')
    output_tuples2 = group_by(all_tuples_filename, name_index, value_index, filter = write_filter)
    write_tuple_array(output_tuples2, outfile2, p = True)
    
    outfile3 = os.path.join(folder, 'reads_by_name.txt')
    output_tuples3 = group_by(all_tuples_filename, name_index, value_index, filter = read_filter)
    write_tuple_array(output_tuples3, outfile3, p = True)
    
    filenames = [outfile1, outfile2, outfile3]
    schemes = ['ALL', 'WRITES', 'READS']
    template_file = os.path.join(script_folder, 'template1.gpt')
    
    col_no = 4
    pdf_filename = os.path.join(folder, 'median_by_name.pdf')
    get_cdf_and_plot(filenames, schemes, [col_no]*len(schemes), pdf_filename, folder, template_file)
    
    col_no = 5
    pdf_filename = os.path.join(folder, 'mean_by_name.pdf')
    get_cdf_and_plot(filenames, schemes, [col_no]*len(schemes), pdf_filename, folder, template_file)
    
    # output key stats
    read_median_list = [t[4] for t in output_tuples3]
    read_mean_list = [t[5] for t in output_tuples3]
    write_median_list = [t[4] for t in output_tuples2]
    write_mean_list = [t[5] for t in output_tuples2]

    # delete this.
    #read_median_list2 = []
    #for v in read_median_list:
    #    if v <5000:
    #        read_median_list2.append(v)
    
    kv_tuples = []
    kv_tuples.extend(get_stat_in_tuples(read_median_list, 'read_median_names'))
    kv_tuples.extend(get_stat_in_tuples(read_mean_list, 'read_mean_names')) 
    kv_tuples.extend(get_stat_in_tuples(write_median_list, 'write_median_names'))
    kv_tuples.extend(get_stat_in_tuples(write_mean_list, 'write_mean_names'))
    
    outputfile = os.path.join(folder, 'latency_stats_names.txt')
    write_tuple_array(kv_tuples, outputfile, p = True)
    os.system('cat ' + outputfile)
Example #3
0
    def test_group_by(self):
        first_result = {0: [0, 2, 4, 6], 1: [1, 3, 5, 7]}
        self.assertEqual(first_result, group_by(lambda x: x % 2, [0, 1, 2, 3, 4, 5, 6, 7]))

        second_result = {0: [0, 3, 6], 1: [1, 4, 7], 2: [2, 5]}
        self.assertEqual(second_result, group_by(lambda x: x % 3, [0, 1, 2, 3, 4, 5, 6, 7]))

        third_result = {'even': [2, 8, 10, 12], 'odd': [1, 3, 5, 9]}
        self.assertEqual(third_result,
                         group_by(lambda x: 'odd' if x % 2 else 'even', [1, 2, 3, 5, 8, 9, 10, 12]))
Example #4
0
 def test_if_group_by_function_returns_correct_data2(self):
     self.assertEqual({
         'even': [2, 8, 10, 12],
         'odd': [1, 3, 5, 9]
     },
                      group_by(lambda x: 'odd' if x % 2 else 'even',
                               [1, 2, 3, 5, 8, 9, 10, 12]))
Example #5
0
 def test_another_group(self):
     self.assertNotEqual(
         group_by(is_even, [1, 2, 3, 4, 5, 6, 7, 8, 11, 13, 15]), {
             'even': [2, 4, 6, 8],
             'odd': [1, 3, 5, 7, 11, 13, 15],
             'new_thing': [123, 123]
         })
Example #6
0
def group_and_write_output(filename, name_index, value_index, output_file,
                           filter):
    folder = dirname(filename)

    outfile1 = os.path.join(folder, output_file)
    output_tuples1 = group_by(filename, name_index, value_index, filter)
    write_tuple_array(output_tuples1, outfile1, p=True)
Example #7
0
 def test_example2(self):
     self.assertEqual({
         'even': [2, 8, 10, 12],
         'odd': [1, 3, 5, 9]
     },
                      group_by(lambda x: 'odd' if x % 2 else 'even',
                               [1, 2, 3, 5, 8, 9, 10, 12]))
Example #8
0
 def test_strings(self):
     words = ["Apple", "animal", "apple", "ANIMAL", "animal"]
     word_groups = {
         "apple": ["Apple", "apple"],
         "animal": ["animal", "ANIMAL", "animal"],
     }
     output = group_by(words, key_func=str.lower)
     self.assertEqual(output, word_groups)
Example #9
0
 def test_no_key_function(self):
     words = ["apple", "animal", "apple", "animal", "animal"]
     word_groups = {
         "apple": ["apple", "apple"],
         "animal": ["animal", "animal", "animal"],
     }
     output = group_by(words)
     self.assertEqual(output, word_groups)
Example #10
0
def test_strings():
    words = ['Apple', 'animal', 'apple', 'ANIMAL', 'animal']
    expected = {
        'apple': ['Apple', 'apple'],
        'animal': ['animal', 'ANIMAL', 'animal'],
    }
    output = group_by(words, key_func=str.lower)
    assert output == expected
Example #11
0
def output_by_time(folder, outputfilename='latency_by_time.txt'):
    grouping_index = 6  # start time
    value_index = 4  # value time

    filename = join(folder, 'all_tuples.txt')

    output_tuples = group_by(filename, grouping_index, value_index, filter=read_filter, grouping_function=time_bin)

    output_file = join(folder, outputfilename)
    write_tuple_array(output_tuples, output_file, p=True)
Example #12
0
def count_successful_requests_by_nodes(tuples_file):
    from output_by import read_filter, write_filter
    group_by_index = 1  # lns
    value_index = 4  # latency
    filter = read_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    latency_lns_file = os.path.join(
        os.path.split(tuples_file)[0], 'latency_lns.txt')
    write_tuple_array(output_tuples, latency_lns_file, True)

    group_by_index = 2  # ns
    value_index = 4  # latency
    filter = read_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    latency_ns_file = os.path.join(
        os.path.split(tuples_file)[0], 'latency_ns.txt')
    write_tuple_array(output_tuples, latency_ns_file, True)

    group_by_index = 1  # lns
    value_index = 4  # latency
    filter = write_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    latency_lns_file = os.path.join(
        os.path.split(tuples_file)[0], 'write_latency_lns.txt')
    write_tuple_array(output_tuples, latency_lns_file, True)

    group_by_index = 2  # ns
    value_index = 4  # latency
    filter = write_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    latency_ns_file = os.path.join(
        os.path.split(tuples_file)[0], 'write_latency_ns.txt')
    write_tuple_array(output_tuples, latency_ns_file, True)
Example #13
0
def count_high_latency_nodes(tuples_file):
    group_by_index = 1  # lns
    value_index = 4  # latency
    filter = high_read_latency_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    high_latency_lns_file = os.path.join(
        os.path.split(tuples_file)[0], 'high_latency_lns.txt')
    write_tuple_array(output_tuples, high_latency_lns_file, True)

    group_by_index = 2  # ns
    value_index = 4  # latency
    filter = high_read_latency_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    high_latency_ns_file = os.path.join(
        os.path.split(tuples_file)[0], 'high_latency_ns.txt')
    write_tuple_array(output_tuples, high_latency_ns_file, True)

    group_by_index = 1  # lns
    value_index = 4  # latency
    filter = high_write_latency_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    high_latency_lns_file = os.path.join(
        os.path.split(tuples_file)[0], 'high_write_latency_lns.txt')
    write_tuple_array(output_tuples, high_latency_lns_file, True)

    group_by_index = 2  # ns
    value_index = 4  # latency
    filter = high_write_latency_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    high_latency_ns_file = os.path.join(
        os.path.split(tuples_file)[0], 'high_write_latency_ns.txt')
    write_tuple_array(output_tuples, high_latency_ns_file, True)
def count_successful_requests_by_nodes(tuples_file):
    from output_by import read_filter, write_filter
    group_by_index = 1 # lns
    value_index = 4 # latency
    filter = read_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)
    
    from write_array_to_file import write_tuple_array
    latency_lns_file = os.path.join(os.path.split(tuples_file)[0], 'latency_lns.txt')
    write_tuple_array(output_tuples, latency_lns_file, True)
    
    group_by_index = 2 # ns
    value_index = 4 # latency
    filter = read_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    latency_ns_file = os.path.join(os.path.split(tuples_file)[0], 'latency_ns.txt')
    write_tuple_array(output_tuples, latency_ns_file, True)
    
    
    group_by_index = 1 # lns
    value_index = 4 # latency
    filter = write_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)
    
    from write_array_to_file import write_tuple_array
    latency_lns_file = os.path.join(os.path.split(tuples_file)[0], 'write_latency_lns.txt')
    write_tuple_array(output_tuples, latency_lns_file, True)
    
    group_by_index = 2 # ns
    value_index = 4 # latency
    filter = write_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    latency_ns_file = os.path.join(os.path.split(tuples_file)[0], 'write_latency_ns.txt')
    write_tuple_array(output_tuples, latency_ns_file, True)
def count_high_latency_nodes(tuples_file):
    group_by_index = 1 # lns
    value_index = 4 # latency
    filter = high_read_latency_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)
    
    from write_array_to_file import write_tuple_array
    high_latency_lns_file = os.path.join(os.path.split(tuples_file)[0], 'high_latency_lns.txt')
    write_tuple_array(output_tuples, high_latency_lns_file, True)
    
    group_by_index = 2 # ns
    value_index = 4 # latency
    filter = high_read_latency_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    high_latency_ns_file = os.path.join(os.path.split(tuples_file)[0], 'high_latency_ns.txt')
    write_tuple_array(output_tuples, high_latency_ns_file, True)
    
    
    group_by_index = 1 # lns
    value_index = 4 # latency
    filter = high_write_latency_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)
    
    from write_array_to_file import write_tuple_array
    high_latency_lns_file = os.path.join(os.path.split(tuples_file)[0], 'high_write_latency_lns.txt')
    write_tuple_array(output_tuples, high_latency_lns_file, True)
    
    group_by_index = 2 # ns
    value_index = 4 # latency
    filter = high_write_latency_filter
    output_tuples = group_by(tuples_file, group_by_index, value_index, filter)

    from write_array_to_file import write_tuple_array
    high_latency_ns_file = os.path.join(os.path.split(tuples_file)[0], 'high_write_latency_ns.txt')
    write_tuple_array(output_tuples, high_latency_ns_file, True)
Example #16
0
 def test_test_tuples_of_strings(self):
     animals = [
         ('agatha', 'dog'),
         ('kurt', 'cat'),
         ('margaret', 'mouse'),
         ('cory', 'cat'),
         ('mary', 'mouse'),
     ]
     animals_by_type = {
         'mouse': [('margaret', 'mouse'), ('mary', 'mouse')],
         'dog': [('agatha', 'dog')],
         'cat': [('kurt', 'cat'), ('cory', 'cat')],
     }
     output = group_by(animals, key_func=itemgetter(1))
     self.assertEqual(output, animals_by_type)
Example #17
0
def test_tuples_of_strings():
    animals = [
        ('agatha', 'dog'),
        ('kurt', 'cat'),
        ('margaret', 'mouse'),
        ('cory', 'cat'),
        ('mary', 'mouse'),
    ]
    expected = {
        'mouse': [('margaret', 'mouse'), ('mary', 'mouse')],
        'dog': [('agatha', 'dog')],
        'cat': [('kurt', 'cat'), ('cory', 'cat')],
    }
    output = group_by(animals, key_func=itemgetter(1))
    assert output == expected
Example #18
0
 def test_example_zero(self):
     self.assertEqual({}, group_by(lambda x: x % 2, []))
Example #19
0
 def test_example3(self):
     self.assertEqual({
         0: [0, 3, 6],
         1: [1, 4, 7],
         2: [2, 5]
     }, group_by(lambda x: x % 3, [0, 1, 2, 3, 4, 5, 6, 7]))
Example #20
0
 def test_group_by_v3(self):
     arr = [0, 1, 2, 3, 4, 5, 6, 7]
     output = {0: [0, 3, 6], 1: [1, 4, 7], 2: [2, 5]}
     self.assertEqual(output, group_by(lambda x: x % 3, arr))
Example #21
0
 def test_if_group_by_function_returns_correct_data3(self):
     self.assertEqual({
         0: [0, 3, 6],
         1: [1, 4, 7],
         2: [2, 5]
     }, group_by(lambda x: x % 3, [0, 1, 2, 3, 4, 5, 6, 7]))
Example #22
0
def test_default_bonus():
    nums = [1, 2, 1, 3, 2, 1]
    expected = {1: [1, 1, 1], 2: [2, 2], 3: [3]}
    output = group_by(nums)
    assert output == expected
 def test_example1(self):
     self.assertEqual({0: [0, 2, 4, 6], 1: [1, 3, 5, 7]}, group_by(
         lambda x: x % 2, [0, 1, 2, 3, 4, 5, 6, 7]))
 def test_example3(self):
     self.assertEqual({0: [0, 3, 6], 1: [1, 4, 7], 2: [2, 5]}, group_by(
         lambda x: x % 3, [0, 1, 2, 3, 4, 5, 6, 7]))
Example #25
0
 def test_odds(self):
     self.assertEqual(group_by(is_even, [1, 2, 3, 4, 5, 6, 7, 8]), {
         'even': [2, 4, 6, 8],
         'odd': [1, 3, 5, 7]
     })
Example #26
0
 def test_if_group_by_function_returns_correct_data2(self):
     self.assertEqual({'even': [2, 8, 10, 12], 'odd': [1, 3, 5, 9]}, group_by(lambda x: 'odd' if x % 2 else 'even', [1, 2, 3, 5, 8, 9, 10, 12]))
Example #27
0
 def test_if_group_by_function_returns_correct_data1(self):
     self.assertEqual({0: [0, 2, 4, 6], 1: [1, 3, 5, 7]}, group_by(lambda x: x % 2, [0, 1, 2, 3, 4, 5, 6, 7]))
Example #28
0
 def test_if_group_by_function_returns_correct_data3(self):
     self.assertEqual({0: [0, 3, 6], 1: [1, 4, 7], 2: [2, 5]}, group_by(lambda x: x % 3, [0, 1, 2, 3, 4, 5, 6, 7]))
Example #29
0
def parse_dns_output(log_files_dir, output_dir, filter=None):
    
    output_extended_tuple_file(log_files_dir, output_dir)
    
    # plot cdf across requests
    tuples_file = os.path.join(output_dir, 'all_tuples.txt')
    
    filenames = [tuples_file]*2
    schemes = ['Ultra-DNS', 'LNS-RTT']
    #latency_index = 5
    #ping_latency_index = 6
    
    # latency index = 4, ping to lns = 5 for this experiment.
    col_nos = [6, 7]
    pdf_file = os.path.join(output_dir, 'cdf_latency.pdf')
    template_file = '/home/abhigyan/gnrs/gpt_files/template1.gpt'
    
    get_cdf_and_plot(filenames, schemes, col_nos, pdf_file, output_dir, template_file)
    
    # plot cdf across names
    value_index = 6
    name_index = 1  # 0 = lns-query-id, 1 = name-id, 2 = name, 3 = ultra-dns-latency,
    outfile1 = os.path.join(output_dir, 'reads_by_name.txt')
    output_tuples1 = group_by(tuples_file, name_index, value_index, filter = None)
    write_tuple_array(output_tuples1, outfile1, p = True)

    value_index = 7
    name_index = 1  # 1 = name,
    outfile2 = os.path.join(output_dir, 'pings_by_name.txt')
    output_tuples2 = group_by(tuples_file, name_index, value_index, filter = None)
    write_tuple_array(output_tuples2, outfile2, p = True)
    
    filenames = [outfile1,outfile2]
    schemes = ['Ultra-DNS', 'LNS-RTT']
    col_nos = [5, 5] # Mean value index = 5
    pdf_file = os.path.join(output_dir, 'read_mean_by_name.pdf')
    template_file = '/home/abhigyan/gnrs/gpt_files/template1.gpt'
    get_cdf_and_plot(filenames, schemes, col_nos, pdf_file, output_dir, template_file)
    
    filenames = [outfile1,outfile2]
    schemes = ['Ultra-DNS', 'LNS-RTT']
    col_nos = [4, 4] # Median value index = 4
    pdf_file = os.path.join(output_dir, 'read_median_by_name.pdf')
    template_file = '/home/abhigyan/gnrs/gpt_files/template1.gpt'
    get_cdf_and_plot(filenames, schemes, col_nos, pdf_file, output_dir, template_file)

    latency_stats = []
    from stats import get_stat_in_tuples
    latency_stats.extend(get_stat_in_tuples(all_latencies, 'read'))
    latency_stats.extend(get_stat_in_tuples(all_latencies, 'read'))
    
    read_median_list = [ t[4] for t in output_tuples1]
    read_mean_list = [ t[5] for t in output_tuples1]
    latency_stats.extend(get_stat_in_tuples(read_median_list, 'read_median_names'))
    latency_stats.extend(get_stat_in_tuples(read_mean_list, 'read_mean_names'))
    
    outputfile = os.path.join(output_dir, 'latency_stats.txt')
    write_tuple_array(latency_stats, outputfile, p = True)
    os.system('cat ' + outputfile)
    
    ## output them hostwise
    value_index = 6
    name_index = 5 # 0 = lns-query-id, 1 = name-id, 2 = name, 3 = ultra-dns-latency,  4 = hostname
    outfile1 = os.path.join(output_dir, 'reads_by_host.txt')
    output_tuples1 = group_by(tuples_file, name_index, value_index, filter = None, numeric = False)
    write_tuple_array(output_tuples1, outfile1, p = True)
Example #30
0
 def test_example1(self):
     self.assertEqual({
         0: [0, 2, 4, 6],
         1: [1, 3, 5, 7]
     }, group_by(lambda x: x % 2, [0, 1, 2, 3, 4, 5, 6, 7]))
Example #31
0
 def test_more_odds(self):
     self.assertEqual(
         group_by(is_even, [1, 2, 3, 4, 5, 6, 7, 8, 11, 13, 15]), {
             'even': [2, 4, 6, 8],
             'odd': [1, 3, 5, 7, 11, 13, 15]
         })
 def test_example2(self):
     self.assertEqual({'even': [2, 8, 10, 12], 'odd': [1, 3, 5, 9]}, group_by(
         lambda x: 'odd' if x % 2 else 'even', [1, 2, 3, 5, 8, 9, 10, 12]))
Example #33
0
def group_and_write_output(filename, name_index, value_index, output_file, filter):
    folder = dirname(filename)
    
    outfile1 = os.path.join(folder, output_file)
    output_tuples1 = group_by(filename, name_index, value_index, filter)
    write_tuple_array(output_tuples1, outfile1, p = True)
 def test_example_zero(self):
     self.assertEqual({}, group_by(lambda x: x % 2, []))
Example #35
0
 def test_group_by(self):
     arr = [0, 1, 2, 3, 4, 5, 6, 7]
     output = {0: [0, 2, 4, 6], 1: [1, 3, 5, 7]}
     self.assertEqual(output, group_by(lambda x: x % 2, arr))
Example #36
0
 def test_group_by_v2(self):
     arr = [1, 2, 3, 5, 8, 9, 10, 12]
     output = {'even': [2, 8, 10, 12], 'odd': [1, 3, 5, 9]}
     self.assertEqual(output, group_by(lambda x: 'odd' if x % 2 else 'even', arr))
Example #37
0
def output_stats_by_name(all_tuples_filename):

    value_index = 4
    name_index = 0  # 0 = name, 1 = lns, 2 = ns

    # this option removes names for which there is a failed read request

    folder = dirname(all_tuples_filename)

    exclude_failed_reads = True
    if exclude_failed_reads:
        failed_reads_names = select_failed_reads_names(all_tuples_filename)
        write_array(failed_reads_names.keys(),
                    os.path.join(folder, 'failed_reads_names.txt'))
        all_tuples_filename = write_all_tuples_excluding_failed(
            all_tuples_filename, failed_reads_names)

    outfile1 = os.path.join(folder, 'all_by_name.txt')
    output_tuples1 = group_by(all_tuples_filename, name_index, value_index)
    write_tuple_array(output_tuples1, outfile1, p=True)

    outfile2 = os.path.join(folder, 'writes_by_name.txt')
    output_tuples2 = group_by(all_tuples_filename,
                              name_index,
                              value_index,
                              filter=write_filter)
    write_tuple_array(output_tuples2, outfile2, p=True)

    outfile3 = os.path.join(folder, 'reads_by_name.txt')
    output_tuples3 = group_by(all_tuples_filename,
                              name_index,
                              value_index,
                              filter=read_filter)
    write_tuple_array(output_tuples3, outfile3, p=True)

    filenames = [outfile1, outfile2, outfile3]
    schemes = ['ALL', 'WRITES', 'READS']
    template_file = os.path.join(script_folder, 'template1.gpt')

    col_no = 4
    pdf_filename = os.path.join(folder, 'median_by_name.pdf')
    get_cdf_and_plot(filenames, schemes, [col_no] * len(schemes), pdf_filename,
                     folder, template_file)

    col_no = 5
    pdf_filename = os.path.join(folder, 'mean_by_name.pdf')
    get_cdf_and_plot(filenames, schemes, [col_no] * len(schemes), pdf_filename,
                     folder, template_file)

    # output key stats
    read_median_list = [t[4] for t in output_tuples3]
    read_mean_list = [t[5] for t in output_tuples3]
    write_median_list = [t[4] for t in output_tuples2]
    write_mean_list = [t[5] for t in output_tuples2]

    # delete this.
    #read_median_list2 = []
    #for v in read_median_list:
    #    if v <5000:
    #        read_median_list2.append(v)

    kv_tuples = []
    kv_tuples.extend(get_stat_in_tuples(read_median_list, 'read_median_names'))
    kv_tuples.extend(get_stat_in_tuples(read_mean_list, 'read_mean_names'))
    kv_tuples.extend(
        get_stat_in_tuples(write_median_list, 'write_median_names'))
    kv_tuples.extend(get_stat_in_tuples(write_mean_list, 'write_mean_names'))

    outputfile = os.path.join(folder, 'latency_stats_names.txt')
    write_tuple_array(kv_tuples, outputfile, p=True)
    os.system('cat ' + outputfile)
Example #38
0
def test_list_of_ints():
    my_nums = [1, 4, 5, 6, 8, 19, 34, 55]
    expected = {'Odd': [1, 5, 19, 55], 'Even': [4, 6, 8, 34]}
    output = group_by(my_nums, key_func=check_even)
    assert output == expected
Example #39
0
 def test_if_group_by_function_returns_correct_data1(self):
     self.assertEqual({
         0: [0, 2, 4, 6],
         1: [1, 3, 5, 7]
     }, group_by(lambda x: x % 2, [0, 1, 2, 3, 4, 5, 6, 7]))