def run(con_dic):
    s = con_dic['test_config']['scale_up_values']

    scale_up_values = [
        (c, m * s['mem_unit'])
        for c in range(s['cpus_min'], s['cpus_max'], s['cpus_incr'])
        for m in range(s['mem_min'], s['mem_max'], s['mem_incr'])
    ]
    data = {"scale_up_values": scale_up_values}
    con_dic["result_file"] = os.path.dirname(
        os.path.abspath(__file__)) + "/test_case/result"
    pre_role_result = 1
    data_return = {}
    data_max = {}
    data_return["throughput"] = 1

    if con_dic["runner_config"]["yardstick_test_ip"] is None:
        con_dic["runner_config"]["yardstick_test_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    env_pre(con_dic)

    if con_dic["runner_config"]["dashboard"] == 'y':
        if con_dic["runner_config"]["dashboard_ip"] is None:
            con_dic["runner_config"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])

    bandwidth_tmp = 1
    # vcpus and mem are scaled together
    for vcpus, mem in data["scale_up_values"]:
        data_max["throughput"] = 1
        test_config = {
            "vcpus": vcpus,
            "mem": mem,
            "test_time": con_dic['test_config']['test_time']
        }
        data_reply = do_test(test_config, con_dic)
        conf_parser.result_to_file(data_reply, con_dic["out_file"])
        # TODO: figure out which KPI to use
        bandwidth = data_reply["throughput"]
        if data_max["throughput"] < bandwidth:
            data_max = data_reply
        if abs(bandwidth_tmp - bandwidth) / float(bandwidth_tmp) < 0.025:
            LOG.info("this group of data has reached top output")
            break
        else:
            pre_reply = data_reply
            bandwidth_tmp = bandwidth
        cur_role_result = float(pre_reply["throughput"])
        if (abs(pre_role_result - cur_role_result) / float(pre_role_result) <
                0.025):
            LOG.info("The performance increases slowly")
        if data_return["throughput"] < data_max["throughput"]:
            data_return = data_max
        pre_role_result = cur_role_result
    LOG.info("Find bottlenecks of this config")
    LOG.info("The max data is %d", data_return["throughput"])
    return data_return
示例#2
0
def run(test_config):
    con_dic = test_config["load_manager"]
    test_num = con_dic['scenarios']['num_stack'].split(',')
    if test_config["contexts"]["yardstick_ip"] is None:
        con_dic["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    if "dashboard" in test_config["contexts"].keys():
        if test_config["contexts"]["dashboard_ip"] is None:
            test_config["contexts"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        DashBoard.posca_stress_ping(test_config["contexts"])

    env_pre(test_config)
    LOG.info("yardstick environment prepare done!")

    for value in test_num:
        result = []
        out_num = 0
        num = int(value)
        # pool = multiprocessing.Pool(processes=num)
        threadings = []
        LOG.info("begin to run %s thread" % num)

        starttime = datetime.datetime.now()

        for i in xrange(0, num):
            temp_thread = threading.Thread(target=func_run, args=(str(i), ))
            threadings.append(temp_thread)
            temp_thread.start()
        for one_thread in threadings:
            one_thread.join()
        while not q.empty():
            result.append(q.get())
        for item in result:
            out_num = out_num + float(item[0])

        endtime = datetime.datetime.now()
        LOG.info("%s thread success %d times" % (num, out_num))
        during_date = (endtime - starttime).seconds

        if out_num >= con_dic["scenarios"]['threshhold']:
            criteria_result = "PASS"
        else:
            criteria_result = "FAIL"

        data_reply = config_to_result(num, out_num, during_date,
                                      criteria_result)
        if "dashboard" in test_config["contexts"].keys():
            DashBoard.dashboard_send_data(test_config['contexts'], data_reply)
        conf_parser.result_to_file(data_reply, test_config["out_file"])

        if criteria_result is "FAIL":
            break
    LOG.info('END POSCA stress ping test')
    return criteria_result
def run(con_dic):
    # can we specify these ranges from command line?
    low, high = con_dic['test_config']['num_vnfs']
    data = {
        "num_vnfs": range(low, high)
    }
    con_dic["result_file"] = os.path.dirname(
        os.path.abspath(__file__)) + "/test_case/result"
    pre_role_result = 1
    data_return = {}
    data_max = {}
    data_return["throughput"] = 1

    if con_dic["runner_config"]["yardstick_test_ip"] is None:
        con_dic["runner_config"]["yardstick_test_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    env_pre(con_dic)

    if con_dic["runner_config"]["dashboard"] == 'y':
        if con_dic["runner_config"]["dashboard_ip"] is None:
            con_dic["runner_config"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        DashBoard.dashboard_system_bandwidth(con_dic["runner_config"])

    bandwidth_tmp = 1
    # vcpus and mem are scaled together
    for num_vnfs in data["scale_up_values"]:
        data_max["throughput"] = 1
        test_config = {
            "num_vnfs": num_vnfs,
            "test_time": con_dic['test_config']['test_time']
        }
        data_reply = do_test(test_config, con_dic)
        conf_parser.result_to_file(data_reply, con_dic["out_file"])
        # TODO: figure out which KPI to use
        bandwidth = data_reply["throughput"]
        if data_max["throughput"] < bandwidth:
            data_max = data_reply
        if abs(bandwidth_tmp - bandwidth) / float(bandwidth_tmp) < 0.025:
            LOG.info("this group of data has reached top output")
            break
        else:
            pre_reply = data_reply
            bandwidth_tmp = bandwidth
        cur_role_result = float(pre_reply["throughput"])
        if (abs(pre_role_result - cur_role_result) /
                float(pre_role_result) < 0.025):
            LOG.info("The performance increases slowly")
        if data_return["throughput"] < data_max["throughput"]:
            data_return = data_max
        pre_role_result = cur_role_result
    LOG.info("Find bottlenecks of this config")
    LOG.info("The max data is %d", data_return["throughput"])
    return data_return
def run(test_config):
    load_config = test_config["load_manager"]
    scenarios_conf = load_config["scenarios"]
    Use_Dashboard = False

    env_pre(None)
    if test_config["contexts"]["yardstick_ip"] is None:
        load_config["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    if "dashboard" in test_config["contexts"].keys():
        if test_config["contexts"]["dashboard_ip"] is None:
            test_config["contexts"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        Use_Dashboard = True

    cpus = conf_parser.str_to_list(scenarios_conf["cpus"])
    mems = conf_parser.str_to_list(scenarios_conf["mems"])
    pkt_size = conf_parser.str_to_list(scenarios_conf["pkt_size"])
    multistream = conf_parser.str_to_list(scenarios_conf["multistream"])
    search_interval = scenarios_conf["search_interval"]

    load_config["result_file"] = os.path.dirname(
        os.path.abspath(__file__)) + "/test_case/result"

    if len(cpus) != len(mems):
        LOG.error("the cpus and mems config data number is not same!")
        os._exit()

    result = []

    for i in range(0, len(cpus)):
        case_config = {
            "vcpu": cpus[i],
            "memory": int(mems[i]) * 1024,
            "multistreams": multistream,
            "pktsize": pkt_size,
            "search_interval": search_interval
        }

        data_reply = do_test(case_config, Use_Dashboard,
                             test_config["contexts"])
        result.append(data_reply)

    LOG.info("Finished bottlenecks testcase")
    LOG.info("The result data is %s", result)
    return result
def run(test_config):
    load_config = test_config["load_manager"]
    scenarios_conf = load_config["scenarios"]
    runner_conf = load_config["runners"]
    contexts_conf = test_config["contexts"]
    Use_Dashboard = False
    env_pre(test_config)
    if test_config["contexts"]["yardstick_ip"] is None:
        load_config["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    if "dashboard" in test_config["contexts"].keys():
        if test_config["contexts"]["dashboard_ip"] is None:
            test_config["contexts"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        Use_Dashboard = True
        DashBoard.posca_moon_init(test_config["contexts"])

    tenants_conf = conf_parser.str_to_list(scenarios_conf["tenants"])
    subject_number = int(scenarios_conf["subject_number"])
    object_number = int(scenarios_conf["object_number"])
    timeout = scenarios_conf["timeout"]
    consul_host = contexts_conf["moon_environment"]["ip"]
    consul_port = contexts_conf["moon_environment"]["consul_port"]

    load_config["result_file"] = os.path.dirname(
        os.path.abspath(__file__)) + "/test_case/result"

    result = []

    for tenants in tenants_conf:
        print tenants
        case_config = {"tenant_number": tenants,
                       "subject_number": subject_number,
                       "object_number": object_number,
                       "timeout": timeout,
                       "consul_host": consul_host,
                       "consul_port": consul_port}

        data_reply = do_test(runner_conf, case_config,
                             Use_Dashboard, test_config["contexts"])
        result.append(data_reply)

    LOG.info("Finished bottlenecks testcase")
    LOG.info("The result data is %s", result)
    return result
def run(test_config):
    print test_config
    load_config = test_config["load_manager"]
    scenarios_conf = load_config["scenarios"]
    Use_Dashboard = True
    env_pre(test_config)
    if test_config["contexts"]["yardstick_ip"] is None:
        load_config["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    if "dashboard" in test_config["contexts"].keys():
        if test_config["contexts"]["dashboard_ip"] is None:
            test_config["contexts"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        Use_Dashboard = True

    num_vnfs = conf_parser.str_to_list(scenarios_conf["number_vnfs"])
    iterations = scenarios_conf["iterations"]
    interval = scenarios_conf["interval"]
    load_config["result_file"] = os.path.dirname(
        os.path.abspath(__file__)) + "/test_case/result"

    result = []

    for i in range(0, len(num_vnfs)):
        print i
        case_config = {
            "num_vnfs": int(num_vnfs[i]),
            "iterations": iterations,
            "interval": interval
        }
        data_reply = do_test(case_config, Use_Dashboard,
                             test_config["contexts"])
        result.append(data_reply)

    LOG.info("Finished bottlenecks testcase")
    LOG.info("The result data is %s", result)
    return result
示例#7
0
def run(test_config):
    con_dic = test_config["load_manager"]
    scenarios_conf = con_dic["scenarios"]

    if test_config["contexts"]["yardstick_ip"] is None:
        con_dic["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    env_pre(test_config)
    LOG.info("yardstick environment prepare done!")

    test_num = conf_parser.str_to_list(scenarios_conf["num_stack"])
    rw = scenarios_conf["rw"]
    bs = scenarios_conf["bs"]
    size = scenarios_conf["size"]
    rwmixwrite = scenarios_conf["rwmixwrite"]
    numjobs = scenarios_conf["num_jobs"]
    direct = scenarios_conf["direct"]
    volume_num = scenarios_conf["volume_num"]
    volume_size = scenarios_conf["volume_size"]

    result = []

    for value in test_num:
        case_config = {
            "stack_num": int(value),
            "volume_num": volume_num,
            "rw": rw,
            "bs": bs,
            "size": size,
            "rwmixwrite": rwmixwrite,
            "numjobs": numjobs,
            "direct": direct,
            "volume_size": int(volume_size)
        }
        data_reply = do_test(case_config)
        result.append(data_reply)

        LOG.info("%s stack successful run" % (value))

        conf_parser.result_to_file(data_reply, test_config["out_file"])

    LOG.info('END POSCA stress multistack storage parallel testcase')
    LOG.info("The result data is %s", result)
    return result
示例#8
0
def run(test_config):
    con_dic = test_config["load_manager"]
    env_pre(None)
    if test_config["contexts"]["yardstick_ip"] is None:
        con_dic["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    if "dashboard" in test_config["contexts"].keys():
        if test_config["contexts"]["dashboard_ip"] is None:
            test_config["contexts"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        Use_Dashboard = True
        DashBoard.dashboard_system_bandwidth(test_config["contexts"])

    data = {}
    rx_pkt_a = con_dic['scenarios']['rx_pkt_sizes'].split(',')
    tx_pkt_a = con_dic['scenarios']['tx_pkt_sizes'].split(',')
    data["rx_pkt_sizes"] = rx_pkt_a
    data["tx_pkt_sizes"] = tx_pkt_a
    con_dic["result_file"] = os.path.dirname(
        os.path.abspath(__file__)) + "/test_case/result"
    cur_role_result = 1
    pre_role_result = 1
    pre_reply = {}
    data_return = {}
    data_max = {}
    data_return["throughput"] = 1

    for test_x in data["tx_pkt_sizes"]:
        data_max["throughput"] = 1
        bandwidth_tmp = 1
        for test_y in data["rx_pkt_sizes"]:
            case_config = {
                "tx_msg_size": float(test_x),
                "rx_msg_size": float(test_y),
                "test_time": con_dic['scenarios']['test_times'],
                "pod_info": conf_parser.bottlenecks_config["pod_info"]
            }
            data_reply = do_test(case_config, Use_Dashboard,
                                 test_config["contexts"])

            conf_parser.result_to_file(data_reply, test_config["out_file"])
            bandwidth = data_reply["throughput"]
            if (data_max["throughput"] < bandwidth):
                data_max = data_reply
            if (abs(bandwidth_tmp - bandwidth) / bandwidth_tmp < 0.025):
                LOG.info("this group of data has reached top output")
                break
            else:
                pre_reply = data_reply
                bandwidth_tmp = bandwidth
        cur_role_result = float(pre_reply["throughput"])
        if (abs(pre_role_result - cur_role_result) / pre_role_result < 0.025):
            LOG.info("The performance increases slowly")
        if data_return["throughput"] < data_max["throughput"]:
            data_return = data_max
        pre_role_result = cur_role_result
    LOG.info("Find bottlenecks of this config")
    LOG.info("The max data is %d", data_return["throughput"])
    return data_return
示例#9
0
def run(test_config):
    con_dic = test_config["load_manager"]
    scenarios_conf = con_dic["scenarios"]

    if test_config["contexts"]["yardstick_ip"] is None:
        con_dic["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    env_pre(test_config)
    LOG.info("yardstick environment prepare done!")

    stack_num = scenarios_conf["num_stack"]
    test_num = conf_parser.str_to_list(scenarios_conf["num_thread"])
    rw = scenarios_conf["rw"]
    bs = scenarios_conf["bs"]
    size = scenarios_conf["size"]
    rwmixwrite = scenarios_conf["rwmixwrite"]
    numjobs = scenarios_conf["num_jobs"]
    direct = scenarios_conf["direct"]
    volume_num = scenarios_conf["volume_num"]
    volume_size = scenarios_conf["volume_size"]

    for value in test_num:
        result = []
        out_num = 0
        num = int(value)
        # pool = multiprocessing.Pool(processes=num)
        threadings = []
        LOG.info("begin to run %s thread" % num)

        starttime = datetime.datetime.now()

        for i in xrange(0, num):
            case_config = {
                "stack_num": int(stack_num),
                "volume_num": volume_num,
                "rw": rw,
                "bs": bs,
                "size": size,
                "rwmixwrite": rwmixwrite,
                "numjobs": numjobs,
                "direct": direct,
                "volume_size": int(volume_size)
            }
            tmp_thread = threading.Thread(target=func_run,
                                          args=(case_config, ))
            threadings.append(tmp_thread)
            tmp_thread.start()

        for one_thread in threadings:
            one_thread.join()
        while not q.empty():
            result.append(q.get())
        for item in result:
            out_num = out_num + float(item)

        print(result)

        endtime = datetime.datetime.now()
        LOG.info("%s thread success %d times" % (num, out_num))
        during_date = (endtime - starttime).seconds

        data_reply = config_to_result(num, out_num, during_date)
        conf_parser.result_to_file(data_reply, test_config["out_file"])

    LOG.info('END POSCA stress multistack storage test')
    return data_reply
示例#10
0
def run(test_config):
    load_config = test_config["load_manager"]
    scenarios_conf = load_config["scenarios"]
    contexts_conf = test_config["contexts"]
    runner_conf = load_config["runners"]
    Use_Dashboard = False

    env_pre(test_config)
    if test_config["contexts"]["yardstick_ip"] is None:
        load_config["contexts"]["yardstick_ip"] =\
            conf_parser.ip_parser("yardstick_test_ip")

    if "dashboard" in test_config["contexts"].keys():
        if test_config["contexts"]["dashboard_ip"] is None:
            test_config["contexts"]["dashboard_ip"] =\
                conf_parser.ip_parser("dashboard")
        LOG.info("Create Dashboard data")
        Use_Dashboard = True
        DashBoard.posca_moon_init(test_config["contexts"])

    subject_number = int(scenarios_conf["subject_number"])
    object_number = int(scenarios_conf["object_number"])
    timeout = scenarios_conf["timeout"]
    consul_host = contexts_conf["moon_environment"]["ip"]
    consul_port = contexts_conf["moon_environment"]["consul_port"]

    initial = scenarios_conf["initial_tenants"]
    threshhold = scenarios_conf["steps_tenants"]
    tolerate_time = scenarios_conf["tolerate_time"]
    case_config = {"subject_number": subject_number,
                   "object_number": object_number,
                   "timeout": timeout,
                   "consul_host": consul_host,
                   "consul_port": consul_port}

    process_queue = Queue.Queue()

    load_config["result_file"] = os.path.dirname(
        os.path.abspath(__file__)) + "/test_case/result"

    result = 0

    if initial is 0:
        tenant_number = threshhold
    else:
        tenant_number = initial
    while switch.value == 0:
        LOG.info("Start %d process", tenant_number)
        for tenant in range(0, tenant_number):
            process = multiprocessing.Process(target=do_test,
                                              args=(runner_conf,
                                                    case_config,
                                                    Use_Dashboard,
                                                    test_config["contexts"],
                                                    ))
            process.start()
            process_queue.put(process)

        result = result + tenant_number
        tenant_number = threshhold
        time.sleep(tolerate_time)

    while process_queue.qsize():
        process = process_queue.get()
        process.terminate()

    if result is initial:
        result = 0
    else:
        result = result - threshhold

    testdate = {"tenant_max": result}
    testresult = config_to_result(testdate)
    LOG.info("Finished bottlenecks testcase")
    LOG.info("The result data is %d", result)
    if Use_Dashboard is True:
        print "Use Dashboard"
        DashBoard.dashboard_send_data(test_config["contexts"], testresult)

    return testresult