示例#1
0
def multi_processing(process_number,
                     target_object,
                     exec_name,
                     exec_list,
                     exec_path,
                     testcase_path,
                     test_path,
                     reload_test,
                     shuffle_test,
                     payloads=None,
                     file_name=None):
    global process_list
    global printPerf_process
    signal.signal(signal.SIGINT, signal_handler)

    path_prefix = "test_generation/"
    exec_path_value = path_prefix + "execution.xml"
    if exec_path != "":
        exec_path_value = exec_path

    testcase_path_value = path_prefix + "testcase.xml"
    if testcase_path != "":
        testcase_path_value = testcase_path

    test_path_value = path_prefix + "test.xml"
    if test_path != "":
        test_path_value = test_path

    if payloads is None:
        xml_tree = xml_parser(test_path_value, testcase_path_value,
                              exec_path_value)
        xml_tree.calc_tests(exec_name)

        print "[*] Number of tests: " + str(xml_tree.get_number_of_elements())
        xml_tree.print_tree()
    else:
        xml_tree = payloads
        print "[*] Number of tests: " + str(xml_tree.get_number_of_elements())

    max_tasks = xml_tree.get_number_of_elements()
    sm_num_of_tasks = Value('i', 0)

    info_queue = Queue()
    queue_list = []
    process_list = []
    qemu_list = []

    process_lock = Semaphore(process_number)
    for i in range(process_number):
        process_lock.acquire()
    sem = Semaphore(config.PROCESS_REPAIR_SEMAPHORE)

    for i in range(process_number):
        queue_list.append(Queue())
        qemu_object = qemu("configurations/" + target_object,
                           "/tmp/vusbf_" + str(i) + "_socket", i)
        qemu_list.append(qemu_object)
        if process_number == 1 and file_name is not None:
            process_list.append(
                Process(target=process,
                        args=("t" + str(i), qemu_object, sm_num_of_tasks, i,
                              info_queue, queue_list[i], reload_test, sem,
                              process_lock),
                        kwargs={"file_postfix_name": file_name}))
        else:
            process_list.append(
                Process(target=process,
                        args=("t" + str(i), qemu_object, sm_num_of_tasks, i,
                              info_queue, queue_list[i], reload_test, sem,
                              process_lock)))

    printPerf_process = Process(target=printPerf,
                                args=(max_tasks, sm_num_of_tasks))

    j = 0
    print "[*] Starting processes..."
    for e in process_list:
        e.start()
        time.sleep(0.1)
    print "[*] Preparing processes..."
    time.sleep(config.PROCESS_STARTUP_TIME)
    num_of_fin = 0
    num_of_processes = len(process_list)
    j = 0
    while True:
        if num_of_fin == num_of_processes:
            break
        if j == num_of_processes - num_of_fin:
            print "[*] Done..."
            printPerf_process.start()
            for i in range(num_of_processes):
                time.sleep(config.PROCESS_STARTUP_RATE)
                process_lock.release()

        process_num = info_queue.get()

        data = xml_tree.get_data_chunk(config.NUMBER_OF_JOBS_PER_PROCESS)
        if data is not None:
            queue_list[process_num].put(data)
            j += 1
        else:
            num_of_fin += 1
            queue_list[process_num].put(None)

    print "[*] Finished..."
示例#2
0
logging.basicConfig(format='%(asctime)s - %(module)s::%(funcName)s - %(message)s', datefmt='%Y-%d-%m %H:%M:%S', level=logging.DEBUG)

is_native = False

# create a fresh results folder
result_dir = 'results'
if os.path.exists(result_dir):
	shutil.rmtree(result_dir)
os.makedirs(result_dir)

src_disk_image = 'images/disk.img'
tmp_image = '/tmp/atf-disk.img'
if is_native:
	backend = native.native('tmp')
else:
	backend = qemu.qemu(src_disk_image, tmp_image)

test_dir = 'tests'
logger = logging.getLogger('main')

test_plan = lib.make_testplan(test_dir)
tests_to_run = sorted(test_plan.keys())

for tid in tests_to_run:
	t = test_plan[tid]
	logger.info('Preparing "%s"' % tid)
	backend.prepare(t)

	logging.info('Executing "%s"' % tid)
	output = backend.launch()
	backend.cleanup()
示例#3
0
def multi_processing(process_number, target_object, exec_name, exec_list, exec_path, testcase_path, test_path,
                     reload_test, shuffle_test, payloads=None, file_name=None):
    global process_list
    global printPerf_process
    signal.signal(signal.SIGINT, signal_handler)

    path_prefix = "test_generation/"
    exec_path_value = path_prefix + "execution.xml"
    if exec_path != "":
        exec_path_value = exec_path

    testcase_path_value = path_prefix + "testcase.xml"
    if testcase_path != "":
        testcase_path_value = testcase_path

    test_path_value = path_prefix + "test.xml"
    if test_path != "":
        test_path_value = test_path

    if payloads is None:
        xml_tree = xml_parser(test_path_value, testcase_path_value, exec_path_value)
        xml_tree.calc_tests(exec_name)

        print "[*] Number of tests: " + str(xml_tree.get_number_of_elements())
        xml_tree.print_tree()
    else:
        xml_tree = payloads
        print "[*] Number of tests: " + str(xml_tree.get_number_of_elements())

    max_tasks = xml_tree.get_number_of_elements()
    sm_num_of_tasks = Value('i', 0)

    info_queue = Queue()
    queue_list = []
    process_list = []
    qemu_list = []

    process_lock = Semaphore(process_number)
    for i in range(process_number):
        process_lock.acquire()
    sem = Semaphore(config.PROCESS_REPAIR_SEMAPHORE)

    for i in range(process_number):
        queue_list.append(Queue())
        qemu_object = qemu("configurations/" + target_object, "/tmp/vusbf_" + str(i) + "_socket", i)
        qemu_list.append(qemu_object)
        if process_number == 1 and file_name is not None:
            process_list.append(Process(target=process, args=(
                "t" + str(i), qemu_object, sm_num_of_tasks, i, info_queue, queue_list[i], reload_test, sem, process_lock), kwargs={"file_postfix_name": file_name}))
        else:
            process_list.append(Process(target=process, args=(
                "t" + str(i), qemu_object, sm_num_of_tasks, i, info_queue, queue_list[i], reload_test, sem, process_lock)))

    printPerf_process = Process(target=printPerf, args=(max_tasks, sm_num_of_tasks))

    j = 0
    print "[*] Starting processes..."
    for e in process_list:
        e.start()
        time.sleep(0.1)
    print "[*] Preparing processes..."
    time.sleep(config.PROCESS_STARTUP_TIME)
    num_of_fin = 0
    num_of_processes = len(process_list)
    j = 0
    while True:
        if num_of_fin == num_of_processes:
            break
        if j == num_of_processes-num_of_fin:
            print "[*] Done..."
            printPerf_process.start()
            for i in range(num_of_processes):
                time.sleep(config.PROCESS_STARTUP_RATE)
                process_lock.release()

        process_num = info_queue.get()

        data = xml_tree.get_data_chunk(config.NUMBER_OF_JOBS_PER_PROCESS)
        if data is not None:
            queue_list[process_num].put(data)
            j += 1
        else:
            num_of_fin += 1
            queue_list[process_num].put(None)

    print "[*] Finished..."
示例#4
0
def client(process_number, target_object, host, port, reload_test):
    global process_list
    global network_requester_process
    signal.signal(signal.SIGINT, signal_handler)
    number_of_threads = process_number

    max_tasks = 100000
    sm_num_of_tasks = Value('i', 0)

    info_queue = Queue()
    queue_list = []
    process_list = []
    process_lock = Semaphore(process_number)
    for i in range(process_number):
        process_lock.acquire()
    sem = Semaphore(config.PROCESS_REPAIR_SEMAPHORE)

    for i in range(number_of_threads):
        queue_list.append(Queue())
        qemu_object = qemu("configurations/" + target_object,
                           "/tmp/vusbf_" + str(i) + "_socket", i)
        process_list.append(
            Process(target=process,
                    args=("t" + str(i), qemu_object, sm_num_of_tasks, i,
                          info_queue, queue_list[i], reload_test, sem,
                          process_lock)))

    printPerf_process = Process(target=printPerf, args=(0, sm_num_of_tasks))

    payload_queue = Queue()
    request_queue = Queue()
    request_queue.put(config.CLUSTERING_CHUNK_SIZE)

    j = 0
    print "[*] Starting processes..."
    for e in process_list:
        e.start()
        time.sleep(0.1)
    print "[*] Preparing processes..."
    time.sleep(config.PROCESS_STARTUP_TIME)

    # start network task requester
    network_requester_process = Process(
        target=start_network_task_requester,
        args=(host, port, "sdsds", "sasas", sm_num_of_tasks, info_queue,
              payload_queue, request_queue, 1337, 2))
    network_requester_process.start()

    num_of_fin = 0
    num_of_processes = len(process_list)
    j = 0
    no_data = False
    while True:
        if num_of_fin == num_of_processes:
            break
        if j == num_of_processes - num_of_fin:
            print "[*] Done..."
            printPerf_process.start()
            for i in range(num_of_processes):
                time.sleep(config.PROCESS_STARTUP_RATE)
                process_lock.release()

        process_num = info_queue.get()

        if not no_data:
            request_queue.put(config.CLUSTERING_CHUNK_SIZE)
            data = payload_queue.get()
        else:
            data = None
        if data is not None:
            queue_list[process_num].put(data)
            j += 1
        else:
            num_of_fin += 1
            queue_list[process_num].put(None)
            no_data = True

    print "[*] Finished..."
    printPerf_process.terminate()
    network_requester_process.terminate()
示例#5
0
def client(process_number, target_object, host, port, reload_test):
    global process_list
    global network_requester_process
    signal.signal(signal.SIGINT, signal_handler)
    number_of_threads = process_number

    max_tasks = 100000
    sm_num_of_tasks = Value('i', 0)

    info_queue = Queue()
    queue_list = []
    process_list = []
    process_lock = Semaphore(process_number)
    for i in range(process_number):
        process_lock.acquire()
    sem = Semaphore(config.PROCESS_REPAIR_SEMAPHORE)

    for i in range(number_of_threads):
        queue_list.append(Queue())
        qemu_object = qemu("configurations/" + target_object, "/tmp/vusbf_" + str(i) + "_socket", i)
        process_list.append(Process(target=process, args=("t" + str(i), qemu_object, sm_num_of_tasks, i, info_queue, queue_list[i], reload_test, sem, process_lock)))

    printPerf_process = Process(target=printPerf, args=(0, sm_num_of_tasks))

    payload_queue = Queue()
    request_queue = Queue()
    request_queue.put(config.CLUSTERING_CHUNK_SIZE)

    j = 0
    print "[*] Starting processes..."
    for e in process_list:
        e.start()
        time.sleep(0.1)
    print "[*] Preparing processes..."
    time.sleep(config.PROCESS_STARTUP_TIME)

    # start network task requester
    network_requester_process = Process(target=start_network_task_requester, args=(host, port, "sdsds", "sasas", sm_num_of_tasks, info_queue, payload_queue, request_queue, 1337, 2))
    network_requester_process.start()

    num_of_fin = 0
    num_of_processes = len(process_list)
    j = 0
    no_data = False
    while True:
        if num_of_fin == num_of_processes:
            break
        if j == num_of_processes-num_of_fin:
            print "[*] Done..."
            printPerf_process.start()
            for i in range(num_of_processes):
                time.sleep(config.PROCESS_STARTUP_RATE)
                process_lock.release()

        process_num = info_queue.get()

        if not no_data:
            request_queue.put(config.CLUSTERING_CHUNK_SIZE)
            data = payload_queue.get()
        else:
            data = None
        if data is not None:
            queue_list[process_num].put(data)
            j += 1
        else:
            num_of_fin += 1
            queue_list[process_num].put(None)
            no_data = True

    print "[*] Finished..."
    printPerf_process.terminate()
    network_requester_process.terminate()