def main(database_file):
    dao = PerformanceTestDAO(database_file)
    session = dao.get_session()
    for num_containers in (1, 5, 10, 20, 40, 60, 80, 100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600):
        test = Test(image_id='packettracer_xvfb_mount', volumes_from='ptdata', number_of_containers=num_containers, repetitions=1)
        session.add(test)
    session.commit()
Exemple #2
0
def main(database_file, log_file):
    print "Generating plots..."
    dao = PerformanceTestDAO(database_file)
    session = dao.get_session()
    measures = create_dictionary()
    for test in session.query(Test).order_by(Test.number_of_containers):
        per_run = create_dictionary(contains_dicts=False)
        for run in test.runs:
            if run.ended:  # Ignore runs which have not ended
                per_container = create_dictionary(contains_dicts=False, fields=(CPU_TOTAL, CPU_PERC, MEMORY, MEMORY_MAX, MEMORY_PERC))
                for container in run.containers:
                    if not container.error:
                        per_container[CPU_TOTAL].append(container.cpu.total_cpu)
                        per_container[CPU_PERC].append(container.cpu.percentual_cpu)
                        per_container[MEMORY].append(container.memory.usage)
                        per_container[MEMORY_MAX].append(container.memory.maximum)
                        per_container[MEMORY_PERC].append(container.memory.percentual)
                per_run[SIZE].append(run.disk.size)
                per_run[RESPONSE_TIME].append(run.response_time.time)
                per_run[CPU_TOTAL].append(numpy.sum(per_container[CPU_TOTAL]))
                per_run[CPU_TOTAL_PC].append(numpy.mean(per_container[CPU_TOTAL]))
                per_run[CPU_PERC].append(numpy.sum(per_container[CPU_PERC]))
                per_run[CPU_PERC_PC].append(numpy.mean(per_container[CPU_PERC]))
                per_run[MEMORY].append(numpy.sum(per_container[MEMORY]))
                per_run[MEMORY_PC].append(numpy.mean(per_container[MEMORY]))
                per_run[MEMORY_MAX].append(numpy.sum(per_container[MEMORY_MAX]))
                per_run[MEMORY_MAX_PC].append(numpy.mean(per_container[MEMORY_MAX]))
                per_run[MEMORY_PERC].append(numpy.sum(per_container[MEMORY_PERC]))
                per_run[MEMORY_PERC_PC].append(numpy.mean(per_container[MEMORY_PERC]))
        for key in measures:
            measures[key][test.number_of_containers] = numpy.mean(per_run[key])
    generate_data_json(measures)
def entry_point():
    parser = ArgumentParser(description='Run benchmark.')
    parser.add_argument('-config', dest='config', default='../config.ini',
                            help='If a valid configuration file is provided, the priority will be: values in parameters,' + 
                            'values in the configuration file and the default values for parameters.')
    parser.add_argument('-docker', dest='url', help='Docker socket URL.')
    parser.add_argument('-db', dest='database', help='Database file.')
    parser.add_argument('-log', dest='log', help='Log file.')
    parser.add_argument('-testId', default=False, dest='testId', help='Benchmark identifier.' +
                            'If it is not provided, all the pending benchmarks will be run.')
    args = parser.parse_args()

    configuration.set_file_path(args.config)

    FORMAT = '%(asctime)-15s %(message)s'
    logging.basicConfig(filename=configuration.get_log(args.log), level=logging.DEBUG, format=FORMAT)

    dao = PerformanceTestDAO(configuration.get_db(args.database))
    docker = DockerClientFactory(configuration.get_docker_url(args.url))

    if not args.testId:
        run_all(docker, dao)
    else:
        session = dao.get_session()
        test = session.query(Test).get(args.testId)
        run_test(docker, dao, test)
def entry_point():
    parser = ArgumentParser(description='Run benchmark.')
    parser.add_argument(
        '-config',
        dest='config',
        default='../config.ini',
        help=
        'If a valid configuration file is provided, the priority will be: values in parameters,'
        +
        'values in the configuration file and the default values for parameters.'
    )
    parser.add_argument('-docker', dest='url', help='Docker socket URL.')
    parser.add_argument('-db', dest='database', help='Database file.')
    parser.add_argument('-log', dest='log', help='Log file.')
    parser.add_argument(
        '-testId',
        default=False,
        dest='testId',
        help='Benchmark identifier.' +
        'If it is not provided, all the pending benchmarks will be run.')
    args = parser.parse_args()

    configuration.set_file_path(args.config)

    FORMAT = '%(asctime)-15s %(message)s'
    logging.basicConfig(filename=configuration.get_log(args.log),
                        level=logging.DEBUG,
                        format=FORMAT)

    dao = PerformanceTestDAO(configuration.get_db(args.database))
    docker = DockerClientFactory(configuration.get_docker_url(args.url))

    if not args.testId:
        run_all(docker, dao)
    else:
        session = dao.get_session()
        test = session.query(Test).get(args.testId)
        run_test(docker, dao, test)