Example #1
0
def main():
    parser = argparse.ArgumentParser(description='A programs that generate a'
                            ' P4 program for benchmarking a particular feature')
    parser.add_argument('--feature', choices=features,
                help='select a feature for benchmarking')
    parser.add_argument('--checksum', default=False, action='store_true',
                            help='perform update checksum')
    # Processing options
    parser.add_argument('--tables', default=1, type=int, help='number of tables')
    parser.add_argument('--table-size', default=1, type=int,
                            help='number of rules in the table')
    # Parser (Field|Header) and Packet Modification options
    parser.add_argument('--headers', default=1, type=int, help='number of headers')
    parser.add_argument('--fields', default=1, type=int, help='number of fields')
    # Parser Complexity
    parser.add_argument('--depth', default=1, type=int,
                            help='the depth of the parse graph')
    parser.add_argument('--fanout', default=2, type=int,
                            help='the number of branch of a node in the parse graph')
    # State Access option
    parser.add_argument('--registers', default=1, type=int, help='number of registers')
    parser.add_argument('--nb-element', default=1024, type=int,
                            help='number of element in a register')
    parser.add_argument('--element-width', default=32, type=int,
                            help='the bit width of a register element')
    # Parser Action complexity
    parser.add_argument('--operations', default=1, type=int,
                            help='the number of set-field/read/write operations')

    args = parser.parse_args()

    if args.feature == 'parse-header':
        benchmark_parser_header(args.headers, args.fields, do_checksum=args.checksum)
    elif args.feature == 'parse-field':
        benchmark_parser_with_header_field(args.fields, do_checksum=args.checksum)
    elif args.feature == 'parse-complex':
        parser_complexity(args.depth, args.fanout)
    elif args.feature == 'set-field':
        benchmark_field_write(args.operations, do_checksum=args.checksum)
    elif args.feature == 'add-header':
        benchmark_modification(args.headers, args.fields, 'add')
    elif args.feature == 'rm-header':
        benchmark_modification(args.headers, args.fields, 'rm')
    elif args.feature == 'pipeline':
        benchmark_pipeline(args.tables, args.table_size)
    elif args.feature == 'read-state':
        benchmark_memory(args.registers, args.element_width, args.nb_element,
                            args.operations, False)
    elif args.feature == 'write-state':
        benchmark_memory(args.registers, args.element_width, args.nb_element,
                            args.operations, True)
    else:
        parser.print_help()
        sys.exit(0)

    print "Generate files to 'output' directory"
def main():
    parser = argparse.ArgumentParser(
        description='A programs that generate a'
        ' P4 program for benchmarking a particular feature')
    parser.add_argument('--feature',
                        choices=features,
                        help='select a feature for benchmarking')
    parser.add_argument('--checksum',
                        default=False,
                        action='store_true',
                        help='perform update checksum')
    # Parser (Field|Header) and Packet Modification options
    parser.add_argument('--headers',
                        default=1,
                        type=int,
                        help='number of headers')
    parser.add_argument('--fields',
                        default=1,
                        type=int,
                        help='number of fields')
    # Parser Complexity
    parser.add_argument('--depth',
                        default=1,
                        type=int,
                        help='the depth of the parse graph')
    parser.add_argument(
        '--fanout',
        default=2,
        type=int,
        help='the number of branch of a node in the parse graph')

    args = parser.parse_args()

    if args.feature == 'parse-header':
        benchmark_parser_header(args.headers,
                                args.fields,
                                do_checksum=args.checksum)
    elif args.feature == 'parse-field':
        benchmark_parser_with_header_field(args.fields,
                                           do_checksum=args.checksum)
    elif args.feature == 'parse-complex':
        parser_complexity(args.depth, args.fanout)
    elif args.feature == 'parse-header16':
        benchmark_parser_header(args.headers,
                                args.fields,
                                do_checksum=args.checksum)
    elif args.feature == 'parse-field16':
        print("cazz")
    #benchmark_parser_with_header_field(args.fields, do_checksum=args.checksum)
    elif args.feature == 'parse-complex16':
        print("cazzz")
 def test_benchmark_parser_header_generator(self):
     ret = benchmark_parser_header(10, 4)
     self.assertTrue(ret)
     prog = 'main'
     ret = call([
         self.p4c,
         'output/%s.p4' % prog, '--json',
         'output/%s.json' % prog
     ])
     self.assertEqual(ret, 0)
Example #4
0
 def compile_p4_program(self):
     ret = benchmark_parser_header(self.nb_header, self.nb_field)
     assert (ret == True)
     prog = 'main'
     json_path = 'output/%s.json' % prog
     out_file = '{0}/p4c.log'.format(self.directory)
     with open(out_file, 'w+') as out:
         p = Popen([self.p4c,
                    'output/%s.p4' % prog, '--json', json_path],
                   stdout=out,
                   stderr=out)
         p.wait()
         assert (p.returncode == 0)
Example #5
0
 def configure(self):
     ret = benchmark_parser_header(self.nb_header, self.nb_field)
     assert (ret == True)
     prog = 'main'
     cmd = """{0}/configure --with-dpdk={1}
             CFLAGS="-g -O2 -Wno-cast-align"
             p4inputfile={2}
             p4outputdir={3}/include/p4/src""".format(
         self.ovs, self.dpdk, 'output/main.p4', dir_path)
     print cmd
     out_file = '{0}/pisces_compiler.log'.format(self.directory)
     with open(out_file, 'w+') as out:
         p = Popen(shlex.split(cmd), stdout=out, stderr=out)
         p.wait()
         assert (p.returncode == 0)
Example #6
0
 def test_benchmark_parser_header(self):
     ret = benchmark_parser_header(10, 1)
     self.assertTrue(ret)
        assert ret == True
        build_p4_prog()
    elif conf['type'] == 'mem':
        assert 'registers' in conf and 'size' in conf and 'elements' in conf and 'operations' in conf
        write_op = True
        if 'write' in conf: write_op = conf['write'].lower() in ['1', 'true', 't', 'yes', 'y']
        ret = benchmark_memory(int(conf['registers']), int(conf['size']), int(conf['elements']), int(conf['operations']), write_op)
        assert ret == True
        build_p4_prog()
    elif conf['type'] == 'pipeline':
        assert 'tables' in conf and 'tbl_size' in conf
        ret = benchmark_pipeline(int(conf['tables']), int(conf['tbl_size']))
        assert ret == True
        build_p4_prog()
    elif conf['type'] == 'parser':
        assert 'headers' in conf and 'fields' in conf
        ret = benchmark_parser_header(int(conf['headers']), int(conf['fields']))
        assert ret == True
        build_p4_prog()
    else:
        assert False, "unknown experiment type: " + conf['type']

    count = int(conf['count']) if 'count' in conf else 100000

    # Run the experiment with the switch and load generator
    sent, recv, lost, tput, duration, results = run_with_load(count=count)

    # Save the results
    dump_tsv(clean_results(results), 'results.tsv')
    dump_tsv([[sent, recv, lost, tput, duration]], 'load_stats.tsv')