def test_cmd_with_res_fmwk(stc):
    ctor = CScriptableCreator()
    sequencer = CStcSystem.Instance().GetObject("Sequencer")

    # Set up an iteration group
    group_cmd = ctor.Create(PKG + ".IterationGroupCommand", sequencer)
    assert group_cmd
    while_cmd = ctor.Create("SequencerWhileCommand", group_cmd)
    assert while_cmd
    iter_cmd = ctor.Create(PKG + ".ObjectIteratorCommand", while_cmd)
    valid_cmd = ctor.Create(PKG + ".IteratorValidateCommand", while_cmd)

    while_cmd.Set("ExpressionCommand", iter_cmd.GetObjectHandle())
    while_cmd.SetCollection("CommandList", [valid_cmd.GetObjectHandle()])
    group_cmd.SetCollection("CommandList", [while_cmd.GetObjectHandle()])
    sequencer.SetCollection("CommandList", [group_cmd.GetObjectHandle()])

    # Setup the methodology manager and the active test case
    meth_mgr = meth_mgr_utils.get_meth_manager()
    assert meth_mgr is not None
    methodology = ctor.Create("StmMethodology", meth_mgr)
    test_case = ctor.Create("StmTestCase", methodology)
    meth_mgr_utils.set_active_test_case(test_case)
    test_case_res = meth_mgr_utils.get_stm_test_result()
    assert test_case_res is not None

    # Mock get_this_cmd
    IteratorValidateCommand.get_this_cmd = MagicMock(return_value=valid_cmd)

    # Set up the results framework (passing iteration)
    RI.create_test()
    RI.start_test()
    fs = 128
    fs_iter_id = 1
    fs_iter_hnd = iter_cmd.GetObjectHandle()
    RI.set_iterator_current_value(fs_iter_hnd, "FrameSize", fs, fs_iter_id)
    RI.add_provider_result(test_utils.dummy_verify_result_passed)
    RI.complete_iteration()

    # Call the different parts of the command and verify the results
    res = IteratorValidateCommand.run(fs_iter_id)
    assert res is True
    assert valid_cmd.Get("Verdict") is True

    # Failed iteration
    RI.set_iterator_current_value(fs_iter_hnd, "FrameSize",
                                  fs + 128, fs_iter_id + 1)
    RI.add_provider_result(test_utils.dummy_verify_result_failed)
    RI.complete_iteration()

    res = IteratorValidateCommand.run(fs_iter_id + 1)
    assert res is True
    assert valid_cmd.Get("Verdict") is False
def set_active_test_case(test_case):
    mm_utils.set_active_test_case(test_case)
    return
def run(StmTestCase, EnableTieCheck, EnableLoadConfig):
    plLogger = PLLogger.GetLogger('methodology')
    plLogger.LogDebug('begin.run.RunStmTestCaseCommand')

    stc_sys = CStcSystem.Instance()
    hnd_reg = CHandleRegistry.Instance()
    test_case = hnd_reg.Find(StmTestCase)
    if test_case is None or not test_case.IsTypeOf('StmTestCase'):
        plLogger.LogError('Was unable to find StmTestCase with handle ' +
                          str(StmTestCase) + ' in the system.')
        return False

    txml_path = os.path.join(test_case.Get('Path'), mgr_const.MM_META_FILE_NAME)
    plLogger.LogDebug('txml_path: ' + str(txml_path))
    plLogger.LogDebug('stm_test_case: ' + test_case.Get('Name'))

    txml_root = get_txml_file_root(txml_path)
    if txml_root is None:
        plLogger.LogError('Could not parse TXML')
        return False

    # Parse the TXML for the keys and values
    key_val_dict, gui_key_val_dict = parse_txml_keys_values(txml_root)

    # Parse the TXML for the processing functions
    proc_func_dict = parse_txml_proc_funcs(txml_root)

    # Run the TXML processing functions
    res = run_txml_proc_funcs(proc_func_dict,
                              key_val_dict,
                              gui_key_val_dict)
    if not res:
        plLogger.LogError('Failed to run TXML processing functions!')
        return False

    # Parse the TXML processing dictionaries
    input_dict_list = get_txml_proc_dicts(txml_root)

    # Parse json dictionaries and run processing functions
    res = run_txml_proc_util(input_dict_list, key_val_dict, gui_key_val_dict)

    if not res:
        plLogger.LogError('Failed to run TXML processing function utilities!')
        return False

    port_group_dict = parse_txml_port_groups(txml_root)
    port_group_list = build_port_group_list(port_group_dict)

    plLogger.LogDebug('key_val_dict (json): ' + json.dumps(key_val_dict))
    plLogger.LogDebug('port_group_list: ' + str(port_group_list))

    # Remove the gui-only keys from key_val_dict
    for key in gui_key_val_dict.keys():
        if key in key_val_dict.keys():
            key_val_dict.pop(key)
    plLogger.LogDebug("key_val_dict without gui only keys (json): " +
                      json.dumps(key_val_dict))

    # Create the ports (they will be tagged appropriately)...
    ports, offline_detected = create_and_tag_ports(port_group_list)

    if EnableTieCheck and not offline_detected:
        estimationUtils = EstimationUtils(test_case)
        output_json = estimationUtils.get_estimates_json()
        plLogger.LogDebug('output_json: ' + output_json)
        tie_pkg = 'spirent.testintel'

        result = None
        verdict = []
        with AutoCommand(tie_pkg + '.ScalingValidatePortsCommand') as tie_cmd:
            tie_cmd.Set('Profile', output_json)
            try:
                tie_cmd.Execute()
            except:
                pass
            result = tie_cmd.Get('PassFailState')
            try:
                verdict = json.loads(tie_cmd.Get('Verdict'))
            except:
                pass
        plLogger.LogInfo('Validation Verdict: {}'.format(verdict))
        if result is None or result != 'PASSED':
            if result is None:
                plLogger.LogError('ERROR: Unable to create an instance of ' +
                                  tiepkg + '.ScalingValidatePortsCommand.')
            else:
                fail_list = []
                for ent in verdict:
                    if 'portLocations' not in ent:
                        continue
                    for loc in ent['portLocations']:
                        if not loc['confidence']:
                            out_fmt = 'Port {} can not run test: {}'
                            out = out_fmt.format(loc['location'],
                                                 loc['reason'])
                            fail_list.append(out)
                        elif loc['confidence'] < 100.0:
                            out_fmt = 'Port {} may run with {}% confidence, ' + \
                                'disable pre-run resource validation check ' + \
                                'to proceed: {}'
                            out = out_fmt.format(loc['location'],
                                                 loc['confidence'], loc['reason'])
                            fail_list.append(out)
                this_cmd = hnd_reg.Find(__commandHandle__)
                this_cmd.Set('Status', '\n'.join(fail_list))
            # Common exit point after setting failure
            return False

    # If true, the caller hasn't already loaded the config, so do it on their behalf
    if EnableLoadConfig:
        with AutoCommand('LoadFromXml') as load_cmd:
            load_cmd.Set('FileName',
                         os.path.join(test_case.Get('Path'),
                                      mgr_const.MM_SEQUENCER_FILE_NAME))
            load_cmd.Execute()

    # Set the active test case
    mm_utils.set_active_test_case(test_case)

    # Configure the MethodologyGroupCommand
    sequencer = stc_sys.GetObject('Sequencer')
    cmd_list = sequencer.GetCollection('CommandList')
    for cmd_hnd in cmd_list:
        cmd = hnd_reg.Find(cmd_hnd)
        if cmd is None:
            continue

        if cmd.IsTypeOf("spirent.methodology.manager.MethodologyGroupCommand"):
            cmd.Set("KeyValueJson", json.dumps(key_val_dict))
            break

    # If any of the ports were offline ports, then assume a test config
    # that isn't to connect to chassis (e.g., a unit test).
    if not offline_detected:
        attach_ports(ports)

    plLogger.LogDebug('end.run.RunStmTestCaseCommand')
    return True