Exemple #1
0
def fake_deco_by_loops_ffu(loops, func, *args, **kw):
    i = 1
    loops = int(loops)
    while i <= loops:
        tc_logger.info("***** {} *****".format(i))
        set_up(level='loop', loop=i)
        if test_conf["event_trace"] is True:
            command = "{} {} > {} &".format(
                test_conf["event_trace_home"] + "/" + "trace_enable.sh",
                test_conf["device_id"],
                test_conf["log_home"] + "/" + "event_trace_" + str(i) + ".log")
            tc_logger.info(
                "Enable event_trace with command: {}".format(command))
            os.system(command)
        func(*args, **kw)
        tear_down(level='loop', loop=i)
        if test_conf["event_trace"] is True:
            command = "{} {}".format(
                test_conf["event_trace_home"] + "/" + "trace_disable.sh",
                test_conf["device_id"])
            tc_logger.info(
                "Disable event_trace with command: {}".format(command))
            os.system(command)
        if os.path.exists(test_conf["graceful_stop_point"]):
            tc_logger.info("File exists: " + test_conf["graceful_stop_point"])
            tc_logger.info("Exit loop before duration threshold")
            break
        i = i + 1
Exemple #2
0
def fake_deco_by_loops(loops, result_file, func, *args, **kw):
    i = 1
    loops = int(loops)
    while i <= loops:
        tc_logger.info("***** {} *****".format(i))
        set_up(level='loop', loop=i)
        if test_conf["event_trace"] is True:
            command = "{} {} > {} &".format(
                test_conf["event_trace_home"] + "/" + "trace_enable.sh",
                test_conf["device_id"],
                test_conf["log_home"] + "/" + "event_trace_" + str(i) + ".log")
            tc_logger.info(
                "Enable event_trace with command: {}".format(command))
            os.system(command)
        result_header, result_value = func(*args, **kw)
        tear_down(level='loop', loop=i)
        if test_conf["event_trace"] is True:
            command = "{} {}".format(
                test_conf["event_trace_home"] + "/" + "trace_disable.sh",
                test_conf["device_id"])
            tc_logger.info(
                "Disable event_trace with command: {}".format(command))
            os.system(command)
        result_header_copy = copy.deepcopy(result_header)
        result_header_copy.insert(0, "Loop")
        result_value.insert(0, str(i))
        write_csv_header(result_file, result_header_copy)
        write_csv_result(result_file, result_value)
        if os.path.exists(test_conf["graceful_stop_point"]):
            tc_logger.info("File exists: " + test_conf["graceful_stop_point"])
            tc_logger.info("Exit loop before duration threshold")
            break
        i = i + 1
Exemple #3
0
    def main():
        initiate_device()
        initiate_file()
        keep_monitor()

        # config tc_logger to print in log.txt
        main_log = os.path.join(test_conf['log_home'], 'log.txt')
        fh = logging.FileHandler(main_log)
        fh.setLevel(logging.DEBUG)
        formatter = logging.Formatter(
            '%(asctime)s - %(levelname)s - %(message)s')
        fh.setFormatter(formatter)
        tc_logger.addHandler(fh)

        # import module and function
        set_up(level='case')
        module = import_module(test_conf['module'])
        func = getattr(module, test_conf['func'])
        try:
            func()
        except Exception as e:
            raise e
        finally:
            if test_conf['monitor'] is True:
                csv_files = [[
                    'monitor', test_conf['monitor_dir'] + '/monitor.csv'
                ]]
                convert_csv_files_to_json_file(csv_files)
            tear_down(level='case')
            if (test_conf.get("ddtf", None) is not None):
                tc_logger.removeHandler(fh)
def tc2_fio_sr_stress():
    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info("Defining pre_case, post_case, pre_loop and post_loop inside of test case")

    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string
    test_conf["ud_pre_loop"] = ud_pre_loop_string
    test_conf["ud_post_loop"] = ud_post_loop_string

    # duration configuration
    if "duration" in test_conf:
        duration = test_conf["duration"]
    else:
        duration = 300

    # main function
    set_up(level='case')
    func = tc_run_fio_by_duration
    func(block_size='512k', duration=duration, file_size='10g', runtime='600', iodepth='32', rw='read')

    result = 0
    # result validation
    statistic_file = os.path.join(test_conf["result_home"], "statistics.csv")
    statistics_column = "Read (MB/s)"
    benchmark_file = _get_basic_stress_bm_filename()
    benchmark_group = ["FIO", "basic", "SR"]
    checkpoint_prefix = "Sequential Read"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    return result
Exemple #5
0
def FIO_4G_SR_Storage_Down_Perf():
  # pre_case, post_case, pre_loop and post_loop definition
  tc_logger.info("Defining pre_case, post_case, pre_loop and post_loop inside of test case")

  test_conf["ud_pre_case"] = ud_pre_case_string
  test_conf["ud_post_case"] = ud_post_case_string
  test_conf["ud_pre_loop"] = ud_pre_loop_string
  test_conf["ud_post_loop"] = ud_post_loop_string

  # loops configration
  test_conf["sub_jobs"] = _sub_jobs
  test_conf["file_size"] = _file_size[1]
  _loops = int(device.auto_calculate_loops()[1][0])

  # main function
  set_up(level='case')
  func = tc_run_fio_by_loops.__wrapped__
  func(iodepth=32, rw="read", block_size="512k", runtime=600, rewrite="false", loops=_loops)

  # performance result verification
  benchmark_item = get_benchmark_item(_get_perf_bm_filename(), ["FIO", "FIO_4G_Storage_Down_Perf"])
  tc_logger.info("Benchmark is as below")
  tc_logger.info(str(benchmark_item))
  result_file = os.path.join(test_conf["result_home"], "fio_rpt.csv")
  result = 0
  
  # SeqRead verification
  values = get_column_from_csv(result_file, "Read (MB/s)")
  values = values[:-1]
  checkpoints = ["Sequential Read(MB/s)" + " - " + str(i+1) for i in range(len(values))]
  result = assert_values_meet_benchmark(values, benchmark_item["SR"], False, "dc.yaml", checkpoints, True) | result
  return result
Exemple #6
0
def tc2_IOZone_basic_Rand_3times():
    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info(
        "Defining pre_case, post_case, pre_loop and post_loop inside of test case"
    )

    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string
    test_conf["ud_pre_loop"] = ud_pre_loop_string
    test_conf["ud_post_loop"] = ud_post_loop_string

    # main function
    set_up(level='case')
    func = tc_run_iozone_by_loops.__wrapped__
    func(threads=8,
         file_size="128m",
         block_size="4k",
         sequential=False,
         loops=3)

    # performance result verification
    benchmark_item = get_benchmark_item(_get_basic_perf_bm_filename(),
                                        ["IOZone", "basic"])
    tc_logger.info("Benchmark is as below")
    tc_logger.info(str(benchmark_item))
    result_file = os.path.join(test_conf["result_home"], "iozone_result.csv")
    result = 0
    # Random Read verification
    checkpoints_prefix = "Random readers(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["RR"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result
    # Random Reread verification
    checkpoints_prefix = "Random writers(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["RW"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result

    # return result
    return result
Exemple #7
0
def tc2_FIO_basic_RandRW73_3times():
    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info(
        "Defining pre_case, post_case, pre_loop and post_loop inside of test case"
    )

    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string
    test_conf["ud_pre_loop"] = ud_pre_loop_string
    test_conf["ud_post_loop"] = ud_post_loop_string

    # main function
    set_up(level='case')
    func = tc_run_fio_by_loops.__wrapped__
    func(iodepth=32,
         rw="randrw",
         rwmixread="70",
         block_size="4k",
         file_size="1g",
         runtime=600,
         loops=3)

    # performance result verification
    benchmark_item = get_benchmark_item(_get_basic_perf_bm_filename(),
                                        ["FIO", "basic_randrw73"])
    tc_logger.info("Benchmark is as below")
    tc_logger.info(str(benchmark_item))
    result_file = os.path.join(test_conf["result_home"], "fio_rpt.csv")
    result = 0
    # Read verification
    values = get_column_from_csv(result_file, "Read (MB/s)")
    values = values[-1:] + values[:-1]
    checkpoints = [
        "Random Read(MB/s)" + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, "Random Read(MB/s)" + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["RR"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result
    # Read verification
    values = get_column_from_csv(result_file, "Write (MB/s)")
    values = values[-1:] + values[:-1]
    checkpoints = [
        "Random Write(MB/s)" + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, "Random Write(MB/s)" + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["RW"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result

    # return result
    return result
def tc2_ab_default_stress():
    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info("Defining pre_case, post_case, pre_loop and post_loop inside of test case")

    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string
    test_conf["ud_pre_loop"] = ud_pre_loop_string
    test_conf["ud_post_loop"] = ud_post_loop_string

    # duration configuration
    if "duration" in test_conf:
        duration = test_conf["duration"]
    else:
        duration = 300

    # main function
    set_up(level='case')
    func = tc_run_micro_by_duration
    func(duration=duration)

    result = 0
    # result validation - Sequential Read
    statistic_file = os.path.join(test_conf["result_home"], "statistics.csv")
    statistics_column = "Sequential Read(MB/s)"
    benchmark_file = _get_basic_stress_bm_filename()
    benchmark_group = ["AB", "default", "SR"]
    checkpoint_prefix = "Sequential Read"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    # result validation - Sequential Write
    statistics_column = "Sequential Write(MB/s)"
    benchmark_group = ["AB", "default", "SW"]
    checkpoint_prefix = "Sequential Write"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    # result validation - Random Read
    statistics_column = "Random Read(MB/s)"
    benchmark_group = ["AB", "default", "RR"]
    checkpoint_prefix = "Random Read"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    # result validation - Random Write
    statistics_column = "Random Write(MB/s)"
    benchmark_group = ["AB", "default", "RW"]
    checkpoint_prefix = "Random Write"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    return result
def tc2_iozone_sequential_stress():
    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info("Defining pre_case, post_case, pre_loop and post_loop inside of test case")

    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string
    test_conf["ud_pre_loop"] = ud_pre_loop_string
    test_conf["ud_post_loop"] = ud_post_loop_string

    # duration configuration
    if "duration" in test_conf:
        duration = test_conf["duration"]
    else:
        duration = 300

    # main function
    set_up(level='case')
    func = tc_run_iozone_by_duration
    func(block_size='512k', duration=duration, file_size='128m', sequential='True', threads='8')

    result = 0
    # result validation - Initial Writer
    statistic_file = os.path.join(test_conf["result_home"], "statistics.csv")
    statistics_column = "Initial Writers(MB/s)"
    benchmark_file = _get_basic_stress_bm_filename()
    benchmark_group = ["IOZone", "basic", "SW"]
    checkpoint_prefix = "Initial Writers"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    # result validation - Initial Writer
    statistics_column = "Rewriters(MB/s)"
    benchmark_group = ["IOZone", "basic", "SW"]
    checkpoint_prefix = "Rewriters"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    # result validation - Reader
    statistics_column = "Readers(MB/s)"
    benchmark_group = ["IOZone", "basic", "SR"]
    checkpoint_prefix = "Readers"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    # result validation - Re-Reader
    statistics_column = "Re-readers(MB/s)"
    benchmark_group = ["IOZone", "basic", "SR"]
    checkpoint_prefix = "Readers"
    result = stress_statistics_validation(statistic_file, statistics_column, benchmark_file,
                                          benchmark_group, checkpoint_prefix) | result

    return result
Exemple #10
0
def FIO_4G_Restore_No_enough_SW_3times():
    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info(
        "Defining pre_case, post_case, pre_loop and post_loop inside of test case"
    )

    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string
    test_conf["ud_pre_loop"] = ud_pre_loop_string
    test_conf["ud_post_loop"] = ud_post_loop_string

    # main function
    set_up(level='case')
    func = tc_run_fio_by_loops.__wrapped__
    func(iodepth=32,
         sub_jobs=4,
         rw="write",
         rewrite="false",
         block_size="512k",
         file_size="1g",
         runtime=600,
         loops=3)

    # performance result verification
    benchmark_item = get_benchmark_item(_get_basic_perf_bm_filename(),
                                        ["FIO", "FIO_4G_Restore_No_enough"])
    tc_logger.info("Benchmark is as below")
    tc_logger.info(str(benchmark_item))
    result_file = os.path.join(test_conf["result_home"], "fio_rpt.csv")
    result = 0
    # SeqWrite verification
    values = get_column_from_csv(result_file, "Write (MB/s)")
    values = values[-1:] + values[:-1]
    checkpoints = [
        "Sequential Write(MB/s)" + " - " + str(i)
        for i in range(1, len(values))
    ]
    checkpoints.insert(0, "Sequential Write(MB/s)" + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["SW"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result
    return result
Exemple #11
0
def fake_deco_by_duration(duration, func, *args, **kw):
    duration = int(duration)
    main_start_time = int(time.time())
    main_current_time = main_start_time

    i = 0
    while (main_current_time - main_start_time) < duration:
        i = i + 1
        tc_logger.info("***** {} *****".format(i))
        set_up(level='loop', loop=i)

        if test_conf["event_trace"] is True:
            command = "{} {} > {} &".format(
                test_conf["event_trace_home"] + "/" + "trace_enable.sh",
                test_conf["device_id"],
                test_conf["log_home"] + "/" + "event_trace_" + str(i) + ".log")
            tc_logger.info(
                "Enable event_trace with command: {}".format(command))
            os.system(command)
        func(*args, **kw)
        tear_down(level='loop', loop=i)
        if test_conf["event_trace"] is True:
            command = "{} {}".format(
                test_conf["event_trace_home"] + "/" + "trace_disable.sh",
                test_conf["device_id"])
            tc_logger.info(
                "Disable event_trace with command: {}".format(command))
            os.system(command)
        main_current_time = int(time.time())
        if os.path.exists(test_conf["graceful_stop_point"]):
            tc_logger.info("File exists: " + test_conf["graceful_stop_point"])
            tc_logger.info("Exit loop before duration threshold")
            break

    tc_logger.info("Main Logic End Time = " +
                   time.strftime("%Y-%m-%d  %H:%M:%S"))
    tc_logger.info("Main Logic End Time = " + str(int(time.time())))
Exemple #12
0
def tc2_AB_default_3times():
    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info(
        "Defining pre_case, post_case, pre_loop and post_loop inside of test case"
    )

    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string
    test_conf["ud_pre_loop"] = ud_pre_loop_string
    test_conf["ud_post_loop"] = ud_post_loop_string

    # main function
    set_up(level='case')
    # tc_run_micro_by_loops(loops=3)
    func = tc_run_micro_by_loops.__wrapped__
    func(loops=3)

    # performance result verification
    benchmark_item = get_benchmark_item(_get_basic_perf_bm_filename(),
                                        ["AB", "default"])
    tc_logger.info("Benchmark is as below")
    tc_logger.info(str(benchmark_item))
    result_file = os.path.join(test_conf["result_home"],
                               "androbench_result.csv")
    result = 0
    # SR verification
    checkpoints_prefix = "Sequential Read(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["SR"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result
    # SW verification
    checkpoints_prefix = "Sequential Write(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["SW"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result
    # RR verification
    checkpoints_prefix = "Random Read(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["RR"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result
    # RW verification
    checkpoints_prefix = "Random Write(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["RW"], False,
                                          "dc.yaml", checkpoints,
                                          True) | result

    # return result
    return result
Exemple #13
0
def IOzone_Seq_4G_Restore_No_enough_3times():
    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info(
        "Defining pre_case, post_case, pre_loop and post_loop inside of test case"
    )

    ud_pre_loop_string.extend([
        "file_for_assistant_test:rw=write@bs=512k@size=4g@runtime=600@fio_fg=True",
        "wb_avail_buf_restore_loop_check_no_enough"
    ])
    ud_post_loop_string.remove("wb_avail_buf_restore_loop_check_no_enough")
    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string
    test_conf["ud_pre_loop"] = ud_pre_loop_string
    test_conf["ud_post_loop"] = ud_post_loop_string

    # main function
    set_up(level='case')
    func = tc_run_iozone_by_loops.__wrapped__
    func(threads=4,
         file_size="1024m",
         block_size="512k",
         sequential=True,
         loops=3)

    # performance result verification
    benchmark_item = get_benchmark_item(
        _get_basic_perf_bm_filename(),
        ["IOZone", "IOzone_Seq_4G_Restore_No_enough"])
    tc_logger.info("Benchmark is as below")
    tc_logger.info(str(benchmark_item))
    result_file = os.path.join(test_conf["result_home"], "iozone_result.csv")
    result = 0
    # Initial Write verification
    checkpoints_prefix = "Initial Writers(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["SW_Initial"],
                                          False, "dc.yaml", checkpoints,
                                          True) | result
    # Rewrite verification
    checkpoints_prefix = "Rewriters(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(
        values, benchmark_item["SW_Rewriters"], False, "dc.yaml", checkpoints,
        True) | result
    # Read verification
    checkpoints_prefix = "Readers(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(values, benchmark_item["SR_Readers"],
                                          False, "dc.yaml", checkpoints,
                                          True) | result
    # Reread verification
    checkpoints_prefix = "Re-readers(MB/s)"
    values = get_column_from_csv(result_file, checkpoints_prefix)
    values = values[-1:] + values[:-1]
    checkpoints = [
        checkpoints_prefix + " - " + str(i) for i in range(1, len(values))
    ]
    checkpoints.insert(0, checkpoints_prefix + " - avg")
    result = assert_values_meet_benchmark(
        values, benchmark_item["SR_Re-readers"], False, "dc.yaml", checkpoints,
        True) | result
    return result
Exemple #14
0
def tc2_wb_function_sample():
    # test data and benchmark definition according to host environment
    result = 0
    if test_conf["chip_capacity"] == "256G":
        test_data = {"fio_file_size": "25G"}
        test_benchmark = {
            "fio_sw_time": {
                "min": 20,
                "max": 60,
                "comments": ["Initial Version"]
            },
            "abs_min": {
                "min": 0,
                "max": 0,
                "comments": ["available buffer size should be used up"]
            },
            "abs_max": {
                "min":
                100,
                "max":
                100,
                "comments": [
                    "available buffer size should be able to recover to A within limited time"
                ]
            },
            "abs_recover_time": {
                "min": 0,
                "max": 600,
                "comments": ["Initial Version"]
            },
            "flush_status_after_recover_1": {
                "min": 3,
                "max": 3,
                "comments":
                ["flush status should be set to 3 after abs recovered"]
            },
            "flush_status_after_recover_2": {
                "min":
                0,
                "max":
                0,
                "comments": [
                    "flush status should be set to 0 after abs recovered and status read"
                ]
            }
        }
    elif test_conf["chip_capacity"] == "128G":
        test_data = {"fio_file_size": "13G"}
        test_benchmark = {
            "fio_sw_time": {
                "min": 10,
                "max": 30,
                "comments": ["Initial Version"]
            },
            "abs_min": {
                "min": 0,
                "max": 0,
                "comments": ["available buffer size should be used up"]
            },
            "abs_max": {
                "min":
                100,
                "max":
                100,
                "comments": [
                    "available buffer size should be able to recover to A within limited time"
                ]
            },
            "abs_recover_time": {
                "min": 0,
                "max": 600,
                "comments": ["Initial Version"]
            },
            "flush_status_after_recover_1": {
                "min": 3,
                "max": 3,
                "comments":
                ["flush status should be set to 3 after abs recovered"]
            },
            "flush_status_after_recover_2": {
                "min":
                0,
                "max":
                0,
                "comments": [
                    "flush status should be set to 0 after abs recovered and status read"
                ]
            }
        }
    else:
        raise Exception("Unsupported chip capacity: " +
                        test_conf["chip_capacity"])

    # pre_case, post_case, pre_loop and post_loop definition
    tc_logger.info(
        "Defining pre_case, post_case, pre_loop and post_loop inside of test case"
    )
    test_conf["ud_pre_case"] = ud_pre_case_string
    test_conf["ud_post_case"] = ud_post_case_string

    # pre case configuration
    set_up(level='case')

    # adb initialization
    adb = ADB(test_conf["device_id"])
    device = Device(test_conf["device_id"])

    # launch abs monitoring in backend
    def wb_func_abs_monitor(abs_use_up_timeout=60,
                            abs_recover_timeout=60,
                            monitor_interval=1,
                            log_file=None):
        if log_file is None:
            log_file = os.path.join(test_conf["monitor_home"],
                                    "wb_func_abs_monitor.log")
        # device = Device(test_conf["device_id"])
        with open(log_file, "w") as f:
            f.write("monitor_start=" + str(time.time()) + os.linesep)

            # monitor whether abs can be used up
            time_start = time.time()
            while True:
                # monitor whether ads is used up
                time_now = time.time()
                if (time_now - time_start) > abs_use_up_timeout:
                    f.write("abs_use_up_ts=timeout" + os.linesep)
                    break
                else:
                    abs_now = device.get_wb_avail_buf()[1][0]
                    f.write(abs_now + os.linesep)
                    if abs_now == "0%":
                        f.write("abs_use_up_ts=" + str(time.time()) +
                                os.linesep)
                        break
                f.flush()
                time.sleep(monitor_interval)

            # monitor whether abs can recover to 100%
            time_start = time.time()
            while True:
                # monitor whether ads is used up
                time_now = time.time()
                if (time_now - time_start) > abs_recover_timeout:
                    f.write("abs_recover_ts=timeout" + os.linesep)
                    break
                else:
                    abs_now = device.get_wb_avail_buf()[1][0]
                    f.write(abs_now + os.linesep)
                    if abs_now == "100%":
                        f.write("abs_recover_ts=" + str(time.time()) +
                                os.linesep)
                        break
                f.flush()
                time.sleep(monitor_interval)

    p = Process(target=wb_func_abs_monitor,
                args=[
                    test_benchmark["fio_sw_time"]["max"],
                    test_benchmark["abs_recover_time"]["max"],
                ])
    p.daemon = True
    p.start()

    # run fio command on cell phone background
    cmd = "shell '/data/auto_tools/fio --direct=1 --norandommap=0 --numjobs=1 --ioengine=libaio " \
          + "--iodepth=32 --rw=write --size={}  --bs=512k --runtime=600" \
          + " --name=job1 --filename=/data/auto_tools/fio_test_file'"
    cmd = cmd.format(test_data["fio_file_size"])
    fio_start_ts = time.time()
    tc_logger.info("FIO cmd execution start timestamp: " + str(fio_start_ts))
    adb.execute_adb_command(cmd)
    fio_end_ts = time.time()
    tc_logger.info("FIO cmd execution end timestamp: " + str(fio_end_ts))
    result = assert_values_meet_benchmark([fio_end_ts - fio_start_ts],
                                          test_benchmark["fio_sw_time"], False,
                                          "dc.yaml", ["fio_sw_time"]) | result

    # wait for abs monitoring to completed
    p.join(test_benchmark["fio_sw_time"]["max"] +
           test_benchmark["abs_recover_time"]["max"])
    p.terminate()

    # verify whether abs is used up during fio execution
    monitor_log_file = os.path.join(test_conf["monitor_home"],
                                    "wb_func_abs_monitor.log")
    abs_use_up_ts_pattern = re.compile("abs_use_up_ts=(.+)")
    with open(monitor_log_file, "r") as f:
        for line in f.readlines():
            abs_use_up_ts = abs_use_up_ts_pattern.search(line)
            if abs_use_up_ts is not None:
                break
            else:
                abs_min = line
    abs_min = abs_min.split("%")[0]
    result = assert_values_meet_benchmark(
        [int(abs_min)], test_benchmark["abs_min"], False, "dc.yaml",
        ["abs_min"]) | result

    # verify whether abs can fully recover
    abs_recover_ts_pattern = re.compile("abs_recover_ts=(.+)")
    with open(monitor_log_file, "r") as f:
        for line in f.readlines():
            abs_recover_ts = abs_recover_ts_pattern.search(line)
            if abs_recover_ts is not None:
                abs_recover_ts = abs_recover_ts.group(1)
                break
            else:
                abs_max = line
    abs_max = abs_max.split("%")[0]
    result = assert_values_meet_benchmark(
        [int(abs_max)], test_benchmark["abs_max"], False, "dc.yaml",
        ["abs_max"]) | result

    # verify abs recover time consumption
    if abs_recover_ts == "timeout":
        abs_recover_time = -1
    else:
        abs_recover_time = float(abs_recover_ts) - fio_end_ts
    result = assert_values_meet_benchmark(
        [abs_recover_time], test_benchmark["abs_recover_time"], False,
        "dc.yaml", ["abs_recover_time"]) | result

    # verify flush_status_after_recover_1
    flush_status_after_recover = device.get_wb_flush_status()
    result = assert_values_meet_benchmark(
        [flush_status_after_recover],
        test_benchmark["flush_status_after_recover_1"], False, "dc.yaml",
        ["flush_status_after_recover_1"]) | result

    # flush_status_after_recover_2
    flush_status_after_recover = device.get_wb_flush_status()
    result = assert_values_meet_benchmark(
        [flush_status_after_recover],
        test_benchmark["flush_status_after_recover_2"], False, "dc.yaml",
        ["flush_status_after_recover_2"]) | result

    # return test case result
    return result