Exemplo n.º 1
0
def login_ilo_ipv4(host, cred, headers=None, legacy=False):
    """ Logs into the iLO, creating an iLO session

        'host' IP address of the iLO
        'cred' the Username and Password for the iLO session in a dictionary format
        """

    logger._info('Login to Host: %s with credentials: %s' % (host, cred))
    ilo_client = PERISClient()
    ilo_client._host = host
    ilo_client._cred = cred or {
        "UserName": "******",
        "Password": "******"
    }
    # something weird with the tabs, added this so the file detects something new
    if 'X-Auth-Token' in ilo_client._headers:
        ilo_client._headers.pop('X-Auth-Token')
    if legacy:
        uri = BuiltIn().get_variable_value("${iLO_REST_SESSION_URI}")
    else:
        uri = BuiltIn().get_variable_value("${iLO_REDFISH_SESSION_URI}")
        ilo_client.update_headers(
            'OData-Version',
            BuiltIn().get_variable_value("${iLO_REDFISH_VERSION}"))
    logger._debug('POST Body: %s' % json.dumps(cred))
    resp = ilo_client.post(uri, headers=headers, data=json.dumps(cred))
    sessionID = None
    if 'x-auth-token' in resp.headers:
        sessionID = resp.headers['x-auth-token']
        logger._debug('X-Auth-Token: %s' % sessionID)
        # leaving below here in case we need to test more than 1 active sessions
        #             ilo_client._active_sessions[cred['UserName']] = {'Password': cred['Password'], 'sessionID': sessionID}
        ilo_client._sessionID = sessionID
        ilo_client.update_headers('X-Auth-Token', sessionID)
        if 'location' in resp.headers:
            ilo_client._session_uri = resp.headers['location']
            ilo_client._session_uri = '/re' + ilo_client._session_uri.split(
                're')[1]
    return ilo_client, resp, sessionID
def put_ris_resource(ilo, username, password, path, payload):
    """
    put the ris resource
    :param ilo:
    :param username:
    :param password:
    :param path: /redfish/v1/Systems/1/smartstorageconfig/settings
    :param payload: JSON
    :return: True or False
    """
    output = ''
    try:
        command = "curl -ksL --user %s:%s -H 'content-type:application/json' -X PUT --data '%s' https://%s%s" % (username, password, payload, ilo, path)
        logger._debug("The curl command is %s " % command)
        output = subprocess.check_output(command)
        logger._debug("PUT RIS resource %s for iLO %s output is %s" % (path, ilo, output))
        if output != '' and 'Success' in output:
            return True
        else:
            return False
    except subprocess.CalledProcessError as e:
        raise AssertionError("Delete RIS resource %s for iLO %s had exceptions" % (path, ilo))
Exemplo n.º 3
0
 def get_volume_connected_sessions(self,
                                   mgmt_ip,
                                   username,
                                   password,
                                   volume_name,
                                   port=22):
     ssh_client = paramiko.SSHClient()
     ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
     try:
         ssh_client.connect(mgmt_ip,
                            username=username,
                            password=password,
                            port=int(port))
         command = 'getVolumeInfo volumeName=%s' % volume_name
         stdin, stdout, stderr = ssh_client.exec_command(command)
         output = stdout.readlines()
         logger._debug("getVolumeInfo volumeName=%s output is %s" %
                       (volume_name, output))
         ssh_client.close()
         output_string = ''.join(output)
         connected_sessions = ''
         if 'SESSION' in output_string:
             logger._debug(
                 "SESSION found in getVolumeInfo volumeName=%s output" %
                 volume_name)
             start = output_string.index('SESSION')
             end = output_string.index('PERMISSION')
             sessions_string = output_string[start:end - 4]
             sessions = sessions_string.split('SESSION')
             for session in sessions:
                 print session
                 if 'connected' in session:
                     connected_sessions = connected_sessions + session
             if (connected_sessions == ''):
                 return sessions_string, 'FAIL'
             else:
                 logger._debug("connected sessions for volume %s are %s" %
                               (volume_name, connected_sessions))
                 return connected_sessions, 'PASS'
         else:
             logger._debug(
                 "SESSION not found in getVolumeInfo volumeName=%s output" %
                 volume_name)
             return output_string, 'FAIL'
     except (paramiko.BadHostKeyException, paramiko.AuthenticationException,
             paramiko.SSHException) as e:
         raise AssertionError("SSH exception %s" % e.message)
Exemplo n.º 4
0
    def _transitions_to_needed_state(self, states, ttl=None):
        '''
        Transition to a state with untaken transitions
        '''
        logging._debug("Calling _transitions_to_needed_state")

        # None needed if there are untaken transitions from current state(s)
        if self._untaken_transitions_in_state(states):
            if self.debug:
                logging._log_to_console(
                    "There are untaken transitions in this state.\n")
            return []

        # Longest possible number of needed transitions
        if ttl is None:
            ttl = self._number_of_flat_states()

        # Increase number of transitions to take to find path to needed
        # state(s)
        for depth in range(1, ttl + 1):
            for trans in self._available_transitions(states):
                if 'target' not in trans:
                    continue

                # Check each transition from this state
                nextStates = self._get_next_state(states, trans)
                transitions = self._transitions_to_needed_state(
                    nextStates, depth - 1)
                # FIXME: This can be done better
                if transitions is not None:
                    ret = [trans]
                    ret.extend(transitions)
                    return ret

        # Unable to get to needed state from in $ttl transitions
        return None
def run_cpqlocfg(command):
    '''
    :param command: cpqlocfg.exe command line string with any parameters
    :return:  tuple of cpqlocfg.exe output or error message and PASS or FAIL

    cpqlocfg.exe is a 32 bit Windows app.  It can be run natively on Windows or via the 32 bit wine emulator on Linux.
    Note that not all Linux distributions include 32 bit wine, it may have to be built from source.
    '''
    os = platform.system()
    if os == 'Windows' or os == 'Linux':
        if os == 'Linux':
            wine_path = find_executable('wine')
            if wine_path is None:
                return 'Cannot find wine to run cpqlocfg.exe on Linux', 'FAIL'
            command = wine_path + ' ' + command.replace('\\', '/')
            command = command.split(' ')
        try:
            logger._debug("The command is %s " % command)
            output = subprocess.check_output(command)
            logger._debug("The output is %s" % output)
            if 'Script succeeded' in output:
                start = output.index('<?xml')
                end = output.rindex('>')
                output = output[start:end + 1]
                output = re.sub('\n|\r', '', output)
                str = '<?xml version=\"1.0\"?>'
                output = output.replace(str, '')
                str = "<RIBCL VERSION=\"2.23\"><RESPONSE    STATUS=\"0x0000\"    MESSAGE='No error'     /></RIBCL>"
                output = output.replace(str, '')
                str = "<RESPONSE    STATUS=\"0x0000\"    MESSAGE='No error'     />"
                output = output.replace(str, '')
                return output, 'PASS'
            else:
                return output, 'FAIL'
        except subprocess.CalledProcessError, e:
            return e.output, 'FAIL'
def check_ilo_snmp_address(snmp_settings, address, rocommunity='', check_rocommunity=False):
    '''
    :param snmp_settings:
    :param address: '16.114.220.248'
    :param rocommunity: '', 'abcabc', 'REGEXP:\w{6}',
    '''
    tree = ET.fromstring(snmp_settings)
    laddress = []
    lcommunity = []
    for e in tree.iter():
        if re.search('SNMP_ADDRESS', e.tag):
            fields = e.tag.split('_')
            if len(fields) == 3:
                ip = e.attrib['VALUE']
                # change IPv6 address format
                if re.search(r':', ip):
                    ip = re.sub(r'(:0){2,}', ':', ip)
                laddress.append(ip)
            if len(fields) == 4 and fields[3] == 'ROCOMMUNITY':
                lcommunity.append(e.attrib['VALUE'])
    t = zip(laddress, lcommunity)

    check = False
    for item in t:
        logger._debug("address %s rocommunity %s" % (item[0], item[1]))
        if check_rocommunity:
            if re.match('REGEXP', rocommunity):
                (str, regexp) = rocommunity.split(':')
                if item[0] == address and re.match(regexp, item[1]):
                    logger._debug("address %s rocommunity %s" % (item[0], item[1]))
                    check = True
                    break
            else:
                if item[0] == address and item[1] == rocommunity:
                    logger._debug("address %s rocommunity %s" % (item[0], item[1]))
                    check = True
                    break
        else:
            if item[0] == address:
                check = True
                break

    BuiltIn().should_be_equal(check.__str__(), True.__str__(), msg='check ilo snmp address should match True')
    if check_rocommunity:
        BuiltIn().log('Pass: Check ilo snmp address %s rocommunity %s' % (address, rocommunity), console=True)
    else:
        BuiltIn().log('Pass: Check ilo snmp address %s' % address, console=True)
Exemplo n.º 7
0
    def _get_verifiable_states(self, xml=None, hierarchy=None):
        '''
        Get verifiable states and verify stuff along the way. A verifiable
        state refers to states with transition(s) and/or no sub-states.
        '''
        logging._debug("Calling _get_verifiable_states")
        if xml is None:
            xml = copy.deepcopy(self.xml)

        states = []

        if hierarchy is None:
            hierarchy = []

        # Store hierarchy if at state of parallel tag
        if 'id' in xml:
            xml['-hierarchy'] = hierarchy
            hierarchy.append(xml['id'])

        # Return if no more substates
        if 'state' not in xml and 'parallel' not in xml:
            if hierarchy[-1] == 'parallel':
                raise AttributeError(
                    "found parallel tag %s with no substates" % xml['id'])
            else:
                return xml

        # Might as well verify any initial tags
        if 'initial' in xml:
            initial = copy.deepcopy(xml['initial'])
            if not isinstance(initial, str):
                if 'transition' not in initial:
                    raise AttributeError(
                        "initial tag of %s has no transition" % xml['id'])

                if len(initial['transition']) != 1:
                    raise AttributeError(
                        "initial tag for %s must have only one \
                                        transition" % xml['id'])

                trans = copy.deepcopy(initial['transition'][0])
                if 'target' in trans:
                    raise AttributeError(
                        "transition in initial tag for %s must \
                                        have target" % xml['id'])

                if 'event' in trans:
                    raise AttributeError("transition in initial tag for %s \
                                        shouldn't have event" % xml['id'])

                initial = copy.deepcopy(trans['target'])
            if not ('state' in xml and
                    (key == initial for key in xml['state'].keys())) and not (
                        'parallel' in xml and
                        (key == initial for key in xml['parallel'].keys())):
                raise AttributeError(
                    "failed to find state for initial target %s" % initial)

        # Check in all parallel tags
        if 'parallel' in xml:
            for key in xml['parallel'].keys():
                parallel_hierarchy = copy.deepcopy(hierarchy)
                parallel_hierarchy.append("parallel")
                rc = self._get_verifiable_states(
                    copy.deepcopy(xml['parallel'][key]), parallel_hierarchy)
                states.extend(rc) if isinstance(rc,
                                                list) else states.append(rc)

        # Check in all state tags
        if 'state' in xml:
            for key in xml['state'].keys():
                state_hierarchy = copy.deepcopy(hierarchy)
                state_hierarchy.append("state")
                rc = self._get_verifiable_states(
                    copy.deepcopy(xml['state'][key]), state_hierarchy)
                states.extend(rc) if isinstance(rc,
                                                list) else states.append(rc)

        if 'transition' in xml:
            states.append(xml)

        logging._debug("_get_verifiable_states Returning States: %s" %
                       pprint.pformat(states))

        return states
Exemplo n.º 8
0
    def _get_next_state(self, states, trans):
        '''
        Get next state given transition from current state
        '''
        old_states = copy.deepcopy(states)
        new_states = []
        implied = []

        logging._debug("Calling _get_next_state")
        if self.debug:
            logging._debug("with states: %s" % pprint.pformat(states))
            logging._debug("and with transition: %s" % pprint.pformat(trans))

        # no change if target not defined
        if 'target' not in trans:
            return old_states

        for target in trans['target'].split(' '):
            # check effect of each new target
            target_xml = self._find_state(target)
            if target_xml is None:
                raise RuntimeError("failed to find state %s" % target)

            # if target is at top of SCXML, everything will change
            if (len(target_xml['-hierarchy']) == 1):
                old_states = []

            # check each existing state to see if it is replaced
            for i in range(len(old_states) - 1, -1, -1):
                state = old_states[i]

                logging._debug("Considering State: %s" % pprint.pformat(state))

                # if state is at top of SCXML, it will be replaced
                if (len(state['-hierarchy']) == 1):
                    old_states.pop(i)
                    continue

                # if state is in target hierarchy, it will not change
                if (state['id'] in target_xml['-hierarchy']):
                    continue

                # FIXME: is this the best way to accomplish this?
                parent_type = state['-hierarchy'][-3]
                parent = state['-hierarchy'][-2]
                changing_parent = parent if parent_type == 'state' else state[
                    'id']
                if (changing_parent in target_xml['-hierarchy']):
                    old_states.pop(i)
                    continue

                parent_type = target_xml['-hierarchy'][-3]
                parent = target_xml['-hierarchy'][-2]
                changing_parent = parent if parent_type == 'state' else target_xml[
                    'id']
                if (changing_parent in state['-hierarchy']):
                    old_states.pop(i)
                    continue

            # if target is part of parallel state,
            # include other implied new states
            for i in range(0, len(target_xml['-hierarchy']) - 1, 2):
                # check all parallel states in hierarchy
                if target_xml['-hierarchy'][i] != 'parallel':
                    continue
                parallel = target_xml['-hierarchy'][i + 1]

                # skip if parallel ancestor is part of
                # hierarchy of current states
                # FIXME: Grep: This can be done better.
                for h in old_states:
                    if parallel in h['-hierarchy']:
                        continue
                parallel = self._find_state(parallel)

                # keep track of implied parallel states
                # (ignore dups)
                # FIXME: Grep: This can be done better
                for state_xml in self._get_full_state_from_here(parallel):
                    dup = 0
                    for imply in implied:
                        if imply['id'] == state_xml['id']:
                            dup += 1
                            break

                    if dup > 0:
                        implied.append(state_xml)

            full_state = self._get_full_state_from_here(target_xml)
            if (isinstance(full_state, types.ListType)):
                new_states.extend(full_state)
            else:
                new_states.append(full_state)

        logging._debug("new_states without old_states: %s" %
                       pprint.pformat(new_states))

        # combine new and the unchanged old states
        new_states.extend(old_states)

        logging._debug("new_states with old_states: %s" %
                       pprint.pformat(new_states))

        # remove non-transitionable ancestors of new_states from implied new
        # states
        for i in range(len(implied) - 1, -1, -1):
            id = implied[i]['id']
            for new_state in new_states:
                # of course, remove implied state if it's the same as new state
                if id == new_state['id']:
                    implied.pop(i)
                    break

                lca = self._get_lca(implied[i], new_state)
                # remove state from @implied if LCS with
                # new state is not 'parallel'
                if (lca is not None) and (lca['-hierarchy'][-1] == 'state'):
                    implied.pop(i)
                    break

                # remove state from @implied if it is ancestor of new state
                if (id in new_state['-hierarchy']) or new_state['id'] == id:
                    implied.pop(i)
                    break

        # combine new resulting states with implied states
        new_states.extend(implied)

        if self.debug:
            logging._debug("new_states with implied: %s" %
                           pprint.pformat(new_states))

        # add transitionable ancestors of new_states
        for state in new_states:
            for i in range(1, len(state['-hierarchy']), 2):
                ancestor = state['-hierarchy'][i]
                ancestor = self._find_state(ancestor)
                if 'transitions' not in ancestor:
                    continue
                dup = 0
                # FIXME: Grep: This can be done better
                for new_state in new_states:
                    if new_state['id'] == ancestor['id']:
                        dup += 1
                        break

                if dup > 0:
                    new_states.append(ancestor)

        if self.debug:
            logging._debug("new_states with ancestors: %s" %
                           pprint.pformat(new_states))

        return sorted(new_states, key=itemgetter('id'))
Exemplo n.º 9
0
 def _number_of_flat_states(self):
     '''
     Number of states in model
     '''
     logging._debug("%s flat states found" % len(self.flat_model.keys()))
     return len(self.flat_model.keys())
Exemplo n.º 10
0
    def run_all_transitions(
        self,
        type="online",
        file=None,
        minutes=None,
    ):
        '''
        Runs all transitions
        usage: runAllTransitions(minutes, type, file)
         minutes -- maximum number of minutes to run
         type -- online or offline (defaults to onliine)
         file -- file to log transitions
        '''
        path = None
        fh = None
        start = time.time()

        # Validate args
        if (type is None) or (string.lower(type) != "online"
                              and string.lower(type) != "offline"):
            raise AttributeError(
                "run_all_transitions requires 'type' arg set to \
                            offline or online")

        if (string.lower(type) == "offline" and (file is None)):
            raise AttributeError("run_all_transitions requires 'file' arg for \
                            offline run")

        if (string.lower(type) == "offline" and minutes is not None):
            raise AttributeError(
                "can't use 'minutes' constraint in offline test \
                            generation")

        if self.root is None:
            raise AttributeError("can't run_all_transitions without SCXML to \
                            constructor")

        # State with initial state
        curr_states = self._get_full_state(self.root.attrib['initial'])
        self._flatten_state(curr_states)

        if file is None:
            path = []
            for state in curr_states:
                path.append("state: %s" % state['id'])
        else:
            path = file
            fh = open(file, "w")
            for state in curr_states:
                fh.write("state: %s\n" % state['id'])

        # for trans in self._untaken_transitions():
        while self._untaken_transitions():
            if self.debug:
                logging._log_to_console(
                    "=======================================================")
                logging._log_to_console("Now in state: %s\n" %
                                        pprint.pformat(curr_states[0]['id']))

            # Check for exit conditions
            if minutes is not None:
                if (time.time() - start) > (int(minutes) * 60):
                    logging._info(
                        "Time Constraint of %s minutes has been exceeded. Execution complete."
                        % minutes)
                    break

            # Get to closest state with untaken transitions
            transitions = self._transitions_to_needed_state(curr_states)
            if transitions is None:
                state_ids = [state['id'] for state in curr_states]
                raise RuntimeError("Unable to get to needed states from %s" %
                                   ' '.join(state_ids))

            # Log transitions to get to that state
            for trans in transitions:
                curr_states = self._get_next_state(curr_states, trans)
                if file is not None:
                    fh.write("event: %s\n" % trans['event'])
                    # FIXME: Better way to do this
                    for state in curr_states:
                        fh.write("state: %s\n" % state['id'])
                else:
                    path.append("event: %s" % trans['event'])
                    # FIXME: Better way to do this
                    for state in curr_states:
                        path.append("state: %s" % state['id'])

            logging._debug("curr_states: %s" % pprint.pformat(curr_states))

            # Choose an untaken transition
            if self.debug:
                logging._log_to_console("Getting untaken transitions...")
            transitions = self._untaken_transitions_in_state(curr_states)
            if self.debug:
                logging._log_to_console("Selecting from transitions: %s" %
                                        pprint.pformat(transitions))
            trans = transitions[random.randint(0, len(transitions) - 1)]
            trans['-taken'] = 1
            curr_states = self._get_next_state(curr_states, trans)
            self._flatten_state(curr_states)

            if self.debug:
                logging._log_to_console("\nTook transition: %s" %
                                        pprint.pformat(trans))

            # Log it
            if file is not None:
                logging._debug("Writing event '%s' to file." % trans['event'])
                fh.write("event: %s\n" % trans['event'])
                for state in curr_states:
                    logging._debug("Writing state '%s' to file." % state['id'])
                    fh.write("state: %s\n" % state['id'])
            else:
                path.append("event: %s" % trans['event'])
                for state in curr_states:
                    path.append("state: %s" % state['id'])

        if file is not None:
            fh.close()

        # Run this path if online, otherwise done
        if string.lower(type) == "online":
            return self.run_defined_path(path)

        return True
def lookup_server_profile_template_volume_attachment_ATAI(spt):
    """
    :param: spt: volume attachment ATAI will be looked up from the SPT DTO if "SPTVAID:<id>" is defined.
            spt_edit = {
                "type":"ServerProfileTemplateV400","name":SPT1_NAME,
                "serverHardwareTypeUri":'SHT:'+SHT1,"enclosureGroupUri":'EG:'+EG_NAME,
                "iscsiInitiatorNameType":"UserDefined",
                "serialNumberType":"Virtual","macType":"Virtual","wwnType":"Virtual","affinity":"Bay",
                "connections":[
                     {"id":1,"name":"","functionType":"FibreChannel","portId":"Mezz 2:1","requestedMbps":"Auto","networkUri":'FC:fa-a',},
                     {"id":2,"name":"","functionType":"FibreChannel","portId":"Mezz 2:2","requestedMbps":"Auto","networkUri":'FC:fa-b',},
                     {"id":3,"name":"","functionType":"iSCSI","portId":"Flb 1:1-b","requestedMbps":"2500","networkUri":"ETH:network-untagged","ipv4":{}},
                     {"id":4,"name":"","functionType":"iSCSI","portId":"Flb 1:2-b","requestedMbps":"2500","networkUri":"ETH:network-untagged","ipv4":{}},
                     {"id":5,"name":"","functionType":"Ethernet","portId":"Mezz 1:1-a","requestedMbps":"2500","networkUri":"ETH:network-tunnel","ipv4":{}},
                     {"id":6,"name":"","functionType":"Ethernet","portId":"Mezz 1:2-a","requestedMbps":"2500","networkUri":"ETH:network-tunnel","ipv4":{}}
                 ],
                "boot":{"manageBoot":True,"order":["HardDisk","CD","Floppy","USB","PXE"]},"bootMode":None,
                "firmware":{"manageFirmware":False,"firmwareBaselineUri":"","forceInstallFirmware":False,"firmwareInstallType":None},
                "bios":{"manageBios":False,"overriddenSettings":[]},
                "localStorage":{"sasLogicalJBODs":[],"controllers":[]},
                "sanStorage":{"hostOSType":"VMware (ESXi)","manageSanStorage":True,
                    "volumeAttachments":[
                        {"id":1,"volumeUri":"SVOL:"+VOLUME_SHARED_3PAR1,"isBootVolume":False,"lunType":"Manual","lun":None,
                        "associatedTemplateAttachmentId":'SPTVAID:1',
                         "storagePaths":[{"isEnabled":True,"connectionId":1,"targetSelector":"Auto","targets":[]},
                                         {"isEnabled":True,"connectionId":2,"targetSelector":"Auto","targets":[]}]
                         },
                        {"id":2,"volumeUri":"SVOL:"+VOLUME_SHARED_VSA2,"isBootVolume":False,"lunType":"Auto","lun":None,
                        "associatedTemplateAttachmentId":'SPTVAID:2',
                         "storagePaths":[{"isEnabled":False,"connectionId":5,"targetSelector":"Auto","targets":[]},
                                         {"isEnabled":False,"connectionId":6,"targetSelector":"Auto","targets":[]}]
                         },
                    ]
                }
            }
    :return: spt: updated spt with volume attachment ATAI.
    """
    if isinstance(spt, dict) and re.search('ServerProfileTemplate', spt['type']):
        lookup_ATAI = False
        if 'sanStorage' in spt and 'volumeAttachments' in spt['sanStorage'] and len(spt['sanStorage']['volumeAttachments']) != 0:
            for va in spt['sanStorage']['volumeAttachments']:
                if 'associatedTemplateAttachmentId' in va and va['associatedTemplateAttachmentId'] is not None and re.search("^SPTVAID", va['associatedTemplateAttachmentId']):
                    lookup_ATAI = True
        if lookup_ATAI:
            logger._debug('Lookup volume attachment ATAI for SPT %s' % spt['name'])
            param = "?filter='name'='%s'" % spt['name']
            response = fusion_lib.fusion_api_get_server_profile_templates(param=param)
            if response['status_code'] == 200 and response['count'] == 1:
                spt_dto = response['members'][0]
                logger._debug("The SPT DTO is %s" % spt_dto)
                if 'sanStorage' in spt_dto and 'volumeAttachments' in spt_dto['sanStorage'] and len(spt_dto['sanStorage']['volumeAttachments']) != 0:
                    vas = spt['sanStorage']['volumeAttachments']
                    vas_spt = spt_dto['sanStorage']['volumeAttachments']  # volume attachments in the SPT DTO
                    for va in vas:
                        if 'associatedTemplateAttachmentId' in va and va['associatedTemplateAttachmentId'] is not None and re.search("^SPTVAID", va['associatedTemplateAttachmentId']):
                            rtn = re.search("^(SPTVAID):(\d*)$", va['associatedTemplateAttachmentId'])
                            spt_va_id = int(rtn.group(2))
                            logger._debug("SPTVAID is %s" % spt_va_id)
                            found = False
                            for va_spt in vas_spt:
                                if va_spt['id'] == spt_va_id:
                                    found = True
                                    logger._debug("ATAI for template volume id %s is %s" % (spt_va_id, va_spt['associatedTemplateAttachmentId']))
                                    va['associatedTemplateAttachmentId'] = va_spt['associatedTemplateAttachmentId']
                            if not found:
                                va['associatedTemplateAttachmentId'] = None
                    logger._debug("After look up ATAI, SPT is now %s" % spt)
                    return spt
                else:
                    raise AssertionError("No sanStorage or volume attachments defined in SPT DTO")
            else:
                raise AssertionError("SPT DTO not found for %s" % spt['name'])
        else:
            logger._debug('No need to lookup volume attachment ATAI for %s' % spt['name'])
            return spt
    else:
        raise AssertionError("The argument spt %s is not SPT DTO" % spt)
def lookup_server_profile_volume_attachment_ATAI(profile):
    """
    :param: profile: volume attachment ATAI will be looked up from the SPT DTO if "SPTVAID:<id>" is defined.
        profile_edit = {
            "type":"ServerProfileV400","name":SPT1_PROFILE1_NAME,"description":None,
            "serverHardwareUri":'SH:'+ENC1SHBAY1,"enclosureGroupUri":'EG:'+EG_NAME,"enclosureUri":'ENC:'+ENC1,
            "serverProfileTemplateUri":"SPT:"+SPT1_NAME,
            "iscsiInitiatorNameType":"UserDefined","iscsiInitiatorName":SPT1_PROFILE1_IQN,
            "serialNumberType":"Virtual","macType":"Virtual","wwnType":"Virtual","affinity":"Bay",
            "hideUnusedFlexNics":True,"osDeploymentSettings":None,
            "connections":[
                {"id":1,"name":"","functionType":"FibreChannel","portId":"Mezz 2:1","requestedMbps":"Auto","networkUri":'FC:fa-a',},
                {"id":2,"name":"","functionType":"FibreChannel","portId":"Mezz 2:2","requestedMbps":"Auto","networkUri":'FC:fa-b',},
                {"id":3,"name":"","functionType":"iSCSI","portId":"Flb 1:1-b","requestedMbps":"2500","networkUri":"ETH:network-untagged",},
                {"id":4,"name":"","functionType":"iSCSI","portId":"Flb 1:2-b","requestedMbps":"2500","networkUri":"ETH:network-untagged",},
                {"id":5,"name":"","functionType":"Ethernet","portId":"Mezz 1:1-a","requestedMbps":"2500","networkUri":"ETH:network-tunnel"},
                {"id":6,"name":"","functionType":"Ethernet","portId":"Mezz 1:2-a","requestedMbps":"2500","networkUri":"ETH:network-tunnel"}
             ],
            "boot":{"manageBoot":True,"order":["HardDisk","CD","Floppy","USB","PXE"]},"bootMode":None,
            "firmware":{"manageFirmware":False,"firmwareBaselineUri":None,"forceInstallFirmware":False,"firmwareInstallType":None},
            "bios":{"manageBios":False,"overriddenSettings":[]},
            "localStorage":{"sasLogicalJBODs":[],"controllers":[]},
            "sanStorage":{"hostOSType":"RHE Linux (5.x, 6.x)","manageSanStorage":True,
                "volumeAttachments":[
                    {"id":1,"volumeUri":"SVOL:c7000-shared-3par1","isBootVolume":False,"lunType":"Manual","lun":0,
                     "associatedTemplateAttachmentId":'SPTVAID:1',
                     "storagePaths":[
                         {"isEnabled":True,"connectionId":1,"targetSelector":"Auto",},
                         {"isEnabled":False,"connectionId":2,"targetSelector":"Auto",},]
                     },
                ]
            }
        }
    :return: profile: updated profile with volume attachment ATAI.
    """
    if isinstance(profile, dict) and re.search('ServerProfile', profile['type']):
        lookup_ATAI = False
        if 'sanStorage' in profile and 'volumeAttachments' in profile['sanStorage'] and len(profile['sanStorage']['volumeAttachments']) != 0:
            for va in profile['sanStorage']['volumeAttachments']:
                if 'associatedTemplateAttachmentId' in va and va['associatedTemplateAttachmentId'] is not None and re.search("^SPTVAID", va['associatedTemplateAttachmentId']):
                    lookup_ATAI = True
        if lookup_ATAI:
            logger._debug('Lookup volume attachment ATAI for profile %s' % profile['name'])
            param = "?filter='name'='%s'" % profile['name']
            response = fusion_lib.fusion_api_get_server_profiles(param=param)
            if response['status_code'] == 200 and response['count'] == 1:
                profile_dto = response['members'][0]
                logger._debug("The profile DTO is %s" % profile_dto)
                if 'serverProfileTemplateUri' in profile_dto and profile_dto['serverProfileTemplateUri'] is not None:
                    spt_uri = profile_dto['serverProfileTemplateUri']
                    spt_dto = fusion_lib.fusion_api_get_resource(uri=spt_uri)
                    logger._debug("The SPT DTO is %s" % spt_dto)
                    if 'sanStorage' in spt_dto and 'volumeAttachments' in spt_dto['sanStorage'] and len(spt_dto['sanStorage']['volumeAttachments']) != 0:
                        vas = profile['sanStorage']['volumeAttachments']
                        vas_spt = spt_dto['sanStorage']['volumeAttachments']  # volume attachments in the SPT DTO
                        for va in vas:
                            if 'associatedTemplateAttachmentId' in va and va['associatedTemplateAttachmentId'] is not None and re.search("^SPTVAID", va['associatedTemplateAttachmentId']):
                                rtn = re.search("^(SPTVAID):(\d*)$", va['associatedTemplateAttachmentId'])
                                spt_va_id = int(rtn.group(2))
                                for va_spt in vas_spt:
                                    logger._debug("id for va_spt is %s" % va_spt['id'])
                                    if va_spt['id'] == spt_va_id:
                                        logger._debug("ATAI for template volume id %s is %s" % (spt_va_id, va_spt['associatedTemplateAttachmentId']))
                                        va['associatedTemplateAttachmentId'] = va_spt['associatedTemplateAttachmentId']
                        logger._debug("After look up ATAI in SPT, profile is now %s" % profile)
                        return profile
                    else:
                        raise AssertionError("No sanStorage or volume attachments defined in SPT DTO")
                else:
                    raise AssertionError("serverProfileTemplateUri is not defined in profile DTO for profile %s" % profile['name'])
            else:
                raise AssertionError("Profile DTO not found for profile %s" % profile['name'])
        else:
            logger._debug('No need to lookup volume attachment ATAI for profile %s' % profile['name'])
            return profile
    else:
        raise AssertionError("The argument profile %s is not profile DTO" % profile)
Exemplo n.º 13
0
def server_profile_mpsettings_local_accounts_should_match_ris(profile, ilo, username, password):
    """
    :param profile = {
        "type": SERVER_PROFILE_TYPE,
        "name": "TEST",
        "description": None,
        "serverHardwareUri": 'SH:'+ENC1SHBAY1,
        "enclosureGroupUri": 'EG:'+EG_NAME,
        "enclosureUri": 'ENC:'+ ENC1,
        "serverProfileTemplateUri": "SPT:",
        "iscsiInitiatorNameType": "AutoGenerated",
        "serialNumberType": "Virtual",
        "macType": "Virtual",
        "wwnType": "Virtual",
        "affinity": "Bay",
        "hideUnusedFlexNics": True,
        "osDeploymentSettings": None,
        "connectionSettings": {"connections":[]},
        "boot":{"manageBoot": False, "order": []}, "bootMode":None,
        "firmware":{"manageFirmware": False, "firmwareBaselineUri": None, "forceInstallFirmware": False,"firmwareInstallType": None},
        "bios": {"manageBios": False, "overriddenSettings": []},
        "localStorage": {"sasLogicalJBODs": [], "controllers": []},
        "sanStorage": {"hostOSType": "VMware (ESXi)", "manageSanStorage": False,"volumeAttachments": []},
        "managementProcessor": { "reapplyState": None, "manageMp": True,
                                 "mpSettings": [
                                     {"settingType": "LocalAccounts",
                                      "args": {"localAccounts" : [
                                                   {
                                                        "userName": "******",
                                                        "displayName": "test",
                                                        "password": "******",
                                                        "hostBIOSConfigPriv": False,
                                                        "hostNICConfigPriv": False,
                                                        "hostStorageConfigPriv": False,
                                                        "loginPriv": True,
                                                        "remoteConsolePriv": True,
                                                        "systemRecoveryConfigPriv": False,
                                                        "userConfigPriv": False,
                                                        "virtualMediaPriv": True,
                                                        "virtualPowerAndResetPriv": True,
                                                        "iLOConfigPriv": False,
                                                    },
                                                ]},
                                     },
                                 ]},
        }
    :param ilo:
    :param username:
    :param password:
    """
    if isinstance(profile, dict) and re.search('ServerProfile', profile['type']):
        BuiltIn().log('Checking mpsettings local accounts for profile %s' % profile['name'], console=True)
        if 'managementProcessor' in profile.keys():
            if 'mpSettings' in profile['managementProcessor'].keys():
                if isinstance(profile['managementProcessor']['mpSettings'], list) and len(profile['managementProcessor']['mpSettings']) != 0:
                    for item in profile['managementProcessor']['mpSettings']:
                        if item['settingType'] == 'LocalAccounts':
                            profile_user_accounts = item['args']
                            # remove the password in the profile user since Redfish doesn't return it
                            for profile_user in profile_user_accounts['localAccounts']:
                                profile_user.pop('password', 0)
                            ris_user_accounts = get_ris_local_user_accounts(ilo, username, password)
                            logger._debug("The profile user accounts: %s" % (profile_user_accounts))
                            logger._debug("The iLO user accounts: %s" % (ris_user_accounts))
                            verify = fusion_lib.fusion_api_validate_response_follow(profile_user_accounts, ris_user_accounts, wordy=True)
                            BuiltIn().should_be_equal(verify, True, msg=('Server profile mpsettings local accounts should match RIS for profile %s') % profile['name'])
                            BuiltIn().log('PASS: Server profile mpsettings local accounts should match RIS for profile %s' % profile['name'], console=True)
                else:
                    raise AssertionError("mpSettings in profile %s is an empty list" % profile['name'])
            else:
                raise AssertionError("mpSettings not defined in profile %s" % profile['name'])
        else:
            raise AssertionError("managementProcessor not defined in profile %s" % profile['name'])
    else:
        raise AssertionError("The argument profile %s is not profile DTO" % profile['name'])
 def _request(self,
              op,
              uri,
              headers=None,
              data=None,
              stream=False,
              etag=None,
              if_none_match=None,
              legacy=False,
              xauthtoken=None,
              username=None,
              password=None,
              timeout=180):
     if headers == "no_auth_token":
         headers = {
             'Accept': 'application/json, */*',
             'Accept-language': 'en_US',
             'Content-Type': 'application/json'
         }
     elif headers == "no_auth_token_with_secret":
         headers = {
             'Accept': 'application/json, */*',
             'Accept-language': 'en_US',
             'Content-Type': 'application/json',
             'X-Secret': 'secret'
         }
     elif headers == "Staging":
         headers = {
             'Accept': 'application/json, */*',
             'Accept-language': 'en_US',
             'Content-Type': 'application/octet-stream',
             'X-Stage-Only': 1
         }
         headers['X-Auth-Token'] = self._sessionID
     elif headers == "Dummy1":
         headers = {
             'Accept': 'application/json, */*',
             'Accept-language': 'en_US',
             'Content-Type': 'application/octet-stream',
             'X-Dummy': 1
         }
         headers['X-Auth-Token'] = self._sessionID
     elif headers == "Dummy2":
         headers = {
             'Accept': 'application/json, */*',
             'Accept-language': 'en_US',
             'Content-Type': 'application/octet-stream',
             'X-Dummy': 1,
             'X-Stage-Only': 1
         }
         headers['X-Auth-Token'] = self._sessionID
     elif headers == "more_than_four_headers":
         headers = {
             'Accept': 'application/json, */*',
             'Accept-language': 'en_US',
             'Content-Type': 'application/octet-stream',
             'X-Dummy1': 1,
             'X-Stage-Only': 1,
             'X-Dummy2': 1,
             'X-Dummy3': 1,
             'X-Dummy4': 1
         }
         headers['X-Auth-Token'] = self._sessionID
     elif headers == "long_headers":
         header_name = "X-Stage"
         padchar = 'A'
         current_length = len(header_name)
         if current_length < 1024:
             for i in range(1024 - current_length):
                 header_name = header_name + padchar
         headers = {
             'Accept': 'application/json, */*',
             'Accept-language': 'en_US',
             'Content-Type': 'application/octet-stream',
             header_name: 1,
             'X-Stage-Only': 1
         }
         headers['X-Auth-Token'] = self._sessionID
     elif headers:
         headers['X-Auth-Token'] = self._sessionID
     else:
         headers = self._headers
     logger._debug('uri %s' % uri)
     logger._debug('base %s' % self._base_url)
     logger._debug('host %s' % self._host)
     uri = self._base_url + self._host + uri
     # Below check for legacy support of some existing calls made to HPCIManager which did not encode the data.
     if isinstance(data, dict):
         data = json.dumps(data)
     try:
         logger._debug(
             '\n%s %s\nRequest Header: %s\nRequest Body: %s\n' %
             (op, uri, pprint.PrettyPrinter().pformat(headers), data))
         resp = self._http.request(op,
                                   uri,
                                   data=data,
                                   headers=headers,
                                   verify=False,
                                   stream=stream,
                                   timeout=timeout)
         logger._debug('\nStatus: %d' % resp.status_code)
         logger._debug('\nResp Header: %s' % resp.headers)
         # Below code for debugging purposes.  Won't work for calls to Diags since that returns raw text instead of json
         # TODO: add condition to check for call to Diags and print raw text instead of json
         # if resp.status_code == 200 and op == 'GET' and stream == False:
         #    logger._debug('\nBody: %s' % resp.json())
     except Exception as e:
         msg = "Exception occurred while attempting to %s: %s" % (op, uri)
         raise Exception(msg, e)
     return resp
Exemplo n.º 15
0
    def _validate_scxml(self):
        '''
        basic validation of the SCXML
        '''
        xml = self.xml

        # Check basic first tags/attributes
        if 'state' not in xml:
            raise AttributeError("No states?")

        if 'initial' not in self.root.attrib:
            raise AttributeError("No initial attribute for scxml")

        # Check and gather states
        states = self._get_verifiable_states()
        if len(states) == 0:
            raise AttributeError("No verifiable states in SCXML")

        # Gather and check transitions for each state
        events = []
        for state in states:
            if 'transition' not in state:
                print "No transitions in state %s. Skipping." % state
                continue

            for trans in state['transition']:
                if 'event' not in trans:
                    raise AttributeError(
                        "missing event in transition for state %s" %
                        state['id'])

                if 'cond' in trans:
                    match = re.search("In\([\"\']\S+[\"\']\)", trans['cond'],
                                      re.IGNORECASE)
                    if match is None:
                        raise AttributeError(
                            "only supporting conditions with In predicates")

                events.append(trans['event'])

        if len(events) == 0:
            raise AttributeError("no transitions with events?")

        # Validate initial
        if not ('state' in xml) and ([
                1
                for x in xml['state'] if x['id'] == self.root.attrib['initial']
        ]):
            raise AttributeError(
                "failed to find corresponding state tag for initial state")

        # No state id can be 'state' or 'parallel', that's just confusing
        if ([
                1 for state in states
                if (re.search(r"(state|parallel)", state['id'], re.IGNORECASE))
        ]):
            raise NameError("don't make a state id 'state' or 'parallel'")

        # Also can't contain a space
        if ([
                1 for state in states
                if (re.search(r"\s", state['id'], re.IGNORECASE))
        ]):
            raise NameError("state or parallel id can't contain a space")

        # If running in debug mode, skip verification of expected keywords
        if self.debug:
            logging._log_to_console("Skipping expected keyword verification")
            return

        # Skip Keyword validation if specified
        if self.skip_keyword_validation:
            logging._debug("Skipping Keyword Validation.")
            return

        # Validating keywords
        for state in states:
            self.built_in.keyword_should_exist(
                state['id'], "Keyword for state '%s' not found" % state['id'])
        for event in events:
            self.built_in.keyword_should_exist(
                event, "Keyword for event '%s' not found" % event)

        pass
Exemplo n.º 16
0
def download_file(link_locator, destination_folder=None):
    """Download a file from a link URL
        Arguments:
            link_locator - the locator of the link which should contain an href attribute
            destination_folder - the optional destination location of the downloaded file
    """
    # get selenium2library reference
    s2l = get_s2l()

    # get the download URL from the link
    url = s2l._element_find(link_locator, True, True).get_attribute("href")

    # if the folder is not specified, default to the current execution
    # directory
    if destination_folder is None:
        destination_folder = test_data.get_variable("EXECDIR")

    # create the destination path if it doesn't exist
    if not os.path.exists(destination_folder):
        os.makedirs(destination_folder)

    # obtain the cookies and session ID from Selenium, copy the cookies into a
    # dict, and issue the request
    cookies = {}
    all_cookies = s2l._cache.current.get_cookies()
    for s_cookie in all_cookies:
        cookies[s_cookie["name"]] = s_cookie["value"]

    # perform the request
    r = requests.get(url,
                     cookies=cookies,
                     headers={'auth': cookies['token']},
                     verify=False)

    # validate the status
    if r.status_code != 200:
        raise FatalError("File download status code was '{0}'".format(
            r.status_code))

    # get file info
    file_length = long(r.headers['content-length'])
    filename = r.headers['content-disposition'].split("; ")[1].split(
        "=")[1].strip('"')
    local_path = join(destination_folder, filename)

    # if the file exists, then append (1)
    while os.path.exists(local_path):
        tok = list(os.path.splitext(local_path))
        tok[0] += "(1)"
        local_path = join(destination_folder, "".join(tok))

    # download the file
    logger._debug("Downloading '{0}' to {1} - ({2} bytes)".format(
        filename, local_path, file_length))
    try:
        lf = open(local_path, 'wb')
        for chunk in r.iter_content(chunk_size=1024):
            if chunk:  # filter out keep-alive new chunks
                lf.write(chunk)
                lf.flush()
        lf.close()

        # validate the file size
        local_file_length = os.path.getsize(local_path)
        if local_file_length != file_length:
            raise FatalError(
                "Download file size doesn't match.  Should be {0}.  Was {1}".
                format(file_length, local_file_length))
        else:
            logger._debug("Successfully downloaded {0} ({1} bytes)".format(
                local_path, local_file_length))
    except:
        raise FatalError("Failed to download '{0}' to '{1}'".format(
            filename, local_path))
Exemplo n.º 17
0
    def _parse_xml(self, parent_element, path=None):
        '''
        Basic validation of the SCXML
        '''
        logging._debug("Calling _parse_xml")

        if path is None:
            path = ""

        data = {}
        # Store parent_element information
        if ('parallel' in parent_element.tag or 'state' in parent_element.tag):
            xtype = 'parallel' if 'parallel' in parent_element.tag else 'state'

            data['id'] = parent_element.attrib['id']

            if 'initial' in parent_element.attrib:
                data['initial'] = parent_element.attrib['initial']

            path += " " if path is not "" else ""  # Avoids leading spaces
            path += xtype  # Add type to hierarchy
            data['-hierarchy'] = path.split(' ')  # Store hierarchy
            path += " " + parent_element.attrib['id']  # Add id to hierarchy

        if parent_element.items():
            dict(parent_element.items())

        states = {}
        transitions = []
        for element in parent_element:
            # print element.tag

            # Ignore Top-most level
            if ('}scxml' in parent_element.tag):
                return self._parse_xml(element, path)

            # Parallel or Serial States
            elif ('parallel' in element.tag or 'state' in element.tag):
                if 'state' not in data:
                    data['state'] = []
                states[element.attrib['id']] = self._parse_xml(element, path)
                data['state'] = states

            # Transitions
            elif 'transition' in element.tag:
                transition = {}

                # Store transition information
                transition['event'] = element.attrib['event']
                transition['target'] = element.attrib['target']

                if 'cond' in element.attrib:
                    transition['cond'] = element.attrib['cond']
                    # Replace multiple spaces with single
                    r = re.compile('\s+')
                    transition['cond'] = r.sub(' ', transition['cond'])

                # TODO: Currently does not store conditions
                # Add transition into transitions array
                transitions.append(transition)
                # Store transitions in data dictionary
                data['transition'] = transitions

        return data
def WFT2_Python_Helper(tasks,
                       timeout=60,
                       interval=2,
                       errorMessage="None Expected",
                       PASS="******",
                       BREAK_LOOP_IF="((?i)Error|Terminated)",
                       VERBOSE=False,
                       **kwargs):
    """
    #   Argument ${tasks} can be a TaskResource dict or a dict that contains a ['headers']['location'] value to a task uri,
    #   or a list of such.  If a list and any task fails, the keyword will fail and remaining tasks are not verified.
    #
    #   Supports BREAK_LOOP_IF and Error Message override of failure for negative testing.  BREAK_LOOP_IF is used to
    #    terminate the wait loop prior to timeout.
    #
    #   This keyword also evaluates ${tasks} to see if an error occurred and thus ${tasks} isn't a task resource.
    #
    #   Defaults: ${timeout}=60  ${interval}=2  ${errorMessage}=None Expected  ${PASS}=((?i)Completed|Warning)  ${BREAK_LOOP_IF}=((?i)Error|Terminated)  ${VERBOSE}=False
    #
    #   Timeout and interval are in seconds, though both can be entered in minutes as in 10m.
    #
    #   If ${errorMessage} is not passed in, error messages will not be be validated for Negative testing.
    #   If ${errorMessage} is passed in and the actual errorMessage value contains ${variables}, then you must supply those variables values.
    #   See errorMessages.py for additional information regarding errorMessages.
    #
    #   Usage examples:
    #   Wait For Task2	${tasks}	   timeout=60	interval=5    pass=Error  errorMessage=ethernet_exists    name=Net777
    #   Wait For Task2    ${tasks}    ${PASS}=((?i)Running|Starting)    ${BREAK_LOOP_IF}=((?i)Error|Terminated)   ${VERBOSE}=True
    #   Wait For Task2    ${tasks}    ${PASS}=((?i)Running|Starting)    ${BREAK_LOOP_IF}=((?i)Error|Terminated)   ${VERBOSE}=True
    #
    #  Note:  By default this WFT2 helper aborts on task failure (if not expected with correct errorMessage).
    # To ignore errors and process all tasks supply a command line switch:
    # pybot -v APPLIANCE_IP:16.114.221:186 -v WFT2_CONTINUE_ON_ERROR:True
    # someTest.robot
    """

    if BuiltIn().get_variable_value("${WFT2_CONTINUE_ON_ERROR}"):
        WFT2_CONTINUE_ON_ERROR = BuiltIn().replace_variables(
            "${WFT2_CONTINUE_ON_ERROR}")
    else:
        WFT2_CONTINUE_ON_ERROR = False

    failingTasks = []
    if WFT2_CONTINUE_ON_ERROR:
        logger._log_to_console_and_log_file(
            "Will process all tasks and continue on errors.")

    # Convert timeout to seconds if passed in as minutes: 10m.
    if re.search(r'm', timeout, re.I):
        logger._log_to_console_and_log_file(
            "Timeout in minutes.  Convert to seconds.")
        timeout = int(float(re.sub('m.*$', '', timeout, re.I)) * 60)

    if re.search(r'm', interval, re.I):
        logger._log_to_console_and_log_file(
            "Interval in minutes.  Convert to seconds.")
        interval = int(float(re.sub('m.*$', '', interval, re.I)) * 60)

    # verify that tasks is a dict or list of dict.  If not, fail and inform
    # user of this requirement.
    if isinstance(tasks, dict):
        logger._log_to_console_and_log_file(
            "tasks is a dict.  Convert to list of this task.")
        tasks = [tasks]
    elif isinstance(tasks, list) and isinstance(tasks[0], dict):
        logger._log_to_console_and_log_file("tasks is a list with %d tasks." %
                                            len(tasks))
    else:
        raise AssertionError(
            "You must pass in a dict or list of dict.  Should WFT2 be called?."
        )

    if VERBOSE:
        logger._log_to_console_and_log_file("task: %s" % tasks)

    # Set up expected error for negative tests.
    if errorMessage == "None Expected":
        expected_error_message = "None Expected"
    else:
        # Get extrapolated expected error message.  Use RG Keyword for now
        expected_error_message = BuiltIn().run_keyword("Get Expected Error",
                                                       errorMessage, kwargs)

    logger._log_to_console_and_log_file("errorMessage: %s" % errorMessage)
    logger._log_to_console_and_log_file("timeout (seconds): %s interval: %s" %
                                        (timeout, interval))
    logger._log_to_console_and_log_file("PASS: %s, BREAK_LOOP_IF: %s" %
                                        (PASS, BREAK_LOOP_IF))
    logger._log_to_console_and_log_file("kwargs: %s" % kwargs)
    logger._log_to_console_and_log_file("expected_error_message: %s" %
                                        expected_error_message)
    logger._log_to_console_and_log_file("WFT2_CONTINUE_ON_ERROR: %s" %
                                        WFT2_CONTINUE_ON_ERROR)

    # if location header available use that, else get the task uri and do Get of that task.
    # This way user can pass in a taskResouce or dict with headers and a
    # location header
    for task in tasks:
        location = None
        task_uri = None
        taskData = {}
        if ('headers' in task) and ('location' in task['headers']):
            location = task['headers']['location']
        if 'uri' in task:
            task_uri = task['uri']

        if location is not None:
            logger._log_to_console_and_log_file(
                "Getting Task using location: %s" % location)
            task = fusion_lib.fusion_api_get_task(uri=location)
        elif task_uri is not None:
            logger._log_to_console_and_log_file(
                "Getting Task using task_uri: %s" % task_uri)
            task = fusion_lib.fusion_api_get_task(uri=task_uri)

        # Determine if ${task} contains taskState thus is a taskResource.
        # If not, check the error message and either fail or pass if it was an
        # expected error.
        resourceName = None
        resourceType = None
        if 'associatedResource' in task:
            resourceName = task['associatedResource']['resourceName']
            resourceType = task['associatedResource']['resourceCategory']
        else:
            print "NO ASSOCIATED RESOURCE"
        if 'taskState' in task:
            taskState = task['taskState']
            logger._log_to_console_and_log_file("taskState: %s" % taskState)
            task_uri = task['uri']
        else:
            logger._log_to_console_and_log_file(
                "taskState not obtained, will check error message")

            if 'message' in task:
                actMessage = task['message']
                logger._log_to_console_and_log_file("actMessage: %s" %
                                                    actMessage)

                if re.search(expected_error_message, actMessage):
                    logger.warn(
                        "actMessage message matches expected error message.")
                    continue
                else:
                    if WFT2_CONTINUE_ON_ERROR:
                        # failingTasks.append(task['uri'])
                        taskData['uri'] = task['uri']
                        taskData['name'] = resourceName
                        failingTasks.append(taskData)
                        logger.warn(
                            "actMessage not matching expected error message.")
                    else:
                        logger.warn(
                            "{0} '{1}' Failed: actMessage not matching expected error message."
                            .format(resourceType, resourceName))
                        out_string = "Task Message:{0} \nRecommendedActions:{1} \nerrorCode:{2} ".format(
                            task['message'], task['recommendedActions'],
                            task['errorCode'])
                        raise AssertionError(
                            "actMessage not matching expected error message.\n{}"
                            .format(out_string))
            else:
                if WFT2_CONTINUE_ON_ERROR:
                    taskData['uri'] = task['uri']
                    taskData['name'] = resourceName
                    failingTasks.append(taskData)
                    logger.warn(
                        "Unable to obtain taskState and no error message.  Should WFT2 be called?."
                    )
                else:
                    raise AssertionError(
                        "Unable to obtain taskState and no error message.  Should WFT2 be called?."
                    )

        # setup up timer
        countDownTo = 0
        countDownFrom = int(timeout)
        logger._log_to_console_and_log_file(
            "Countdown from %s to %s by interval of %s seconds." %
            (countDownFrom, countDownTo, interval))
        interval = int(interval)

        # spin on the task, checking taskState each pass
        passed = False
        break_loop = False
        timed_out = False
        while True:
            # Exit loop if taskState PASS
            if re.search(PASS, taskState):
                logger._log_to_console_and_log_file(
                    "taskState reached expected argument 'PASS' state: %s." %
                    taskState)
                passed = True
                break

            # Exit loop if taskState BREAK_LOOP_IF
            if re.search(BREAK_LOOP_IF, taskState):
                logger._log_to_console_and_log_file(
                    "Break Loop, taskState failed: %s." % taskState)
                break_loop = True
                break

            # Exit loop if timedout
            if countDownFrom <= countDownTo:
                logger._log_to_console_and_log_file(
                    "Task loop timed out after %s seconds" % timeout)
                timed_out = True
                break
            time.sleep(interval)
            countDownFrom -= interval

            # get the task again
            task = fusion_lib.fusion_api_get_task(uri=task_uri)
            if 'taskState' in task:
                taskState = task['taskState']
            else:
                if WFT2_CONTINUE_ON_ERROR:
                    logger.warn(
                        "taskState not found in last GET on task uri: %s" %
                        task_uri)
                else:
                    logger.warn(
                        "{0} '{1}' Failed: taskState not found in last GET taskUri"
                        .format(resourceType, resourceName))
                    raise AssertionError(
                        "taskState not found in last GET on task uri: %s" %
                        task_uri)

            logger._log_to_console_and_log_file("Now at %s, taskState: %s" %
                                                (countDownFrom, taskState))

        # if taskState reached expected taskState: PASS (could be Complete could be Error) then evaluate taskErrors
        # If there are taskErrors and user didn't specify an errorMessage then fail.
        # if there are not taskErrors and the user did specify an error message then fail
        # otherwise pass
        check_expected_error = False
        if passed:
            if ('taskErrors' in task) and (len(task['taskErrors']) > 0):
                if expected_error_message is 'None Expected':
                    if taskState is 'Completed':
                        actMessage = task['taskErrors'][0]['message']
                        if WFT2_CONTINUE_ON_ERROR:
                            # failingTasks.append(task['uri'])
                            taskData['uri'] = task['uri']
                            taskData['name'] = resourceName
                            failingTasks.append(taskData)
                            logger.warn(
                                "Task reached expected state but didn't expect taskError: %s"
                                % actMessage)
                        else:
                            raise AssertionError(
                                "{0} '{1}' Failed: Task reached expected state but didn't expect taskError- '{2}'"
                                .format(resourceType, resourceName,
                                        actMessage))
                    else:
                        logger._log_to_console_and_log_file(
                            "taskError ignored as taskState is not 'Completed' and user did not specify an errorMessage."
                        )
                else:
                    check_expected_error = True

            elif ('taskErrors' not in task) and (expected_error_message
                                                 is not 'None Expected'):
                if WFT2_CONTINUE_ON_ERROR:
                    # failingTasks.append(task['uri'])
                    taskData['uri'] = task['uri']
                    taskData['name'] = resourceName
                    failingTasks.append(taskData)
                    logger.warn(
                        "Task reached expected state but no expected taskError returned.  Expected: %s"
                        % expected_error_message)
                else:
                    logger.warn(
                        "{0} '{1}' Failed: No expected errorMessage returned".
                        format(resourceType, resourceName))
                    raise AssertionError(
                        "Task reached expected state but no expected taskError returned.  Expected: %s"
                        % expected_error_message)
            else:
                if taskState != 'Completed':
                    logger._log_to_console_and_log_file(
                        "Unexpected taskErrors are ignored for non 'Completed' taskState."
                    )
                continue

        if timed_out:
            if WFT2_CONTINUE_ON_ERROR:
                # failingTasks.append(task['uri'])
                taskData['uri'] = task['uri']
                taskData['name'] = resourceName
                failingTasks.append(taskData)
                logger.warn("{0} '{1}' Failed: Task timed out.".format(
                    resourceType, resourceName))
            else:
                logger.warn("{0} '{1}' Failed: Task timed out".format(
                    resourceType, resourceName))
                raise AssertionError("Task timed out.")

        #  user the supplied taskState was met (Error for example) and the passed in a errorMessage then we'll
        #  get to here.  Use this same code to test the error message.
        if break_loop or check_expected_error:
            if 'taskErrors' in task:
                task_tree = get_task_tree(task_uri)
                nodes = []
                task_nodes = get_task_tree_nodes(task_tree,
                                                 nodes,
                                                 taskState=PASS)
                logger._debug("The nodes returned are %s" % task_nodes)
                if check_expected_error:
                    found = check_task_error_message(
                        task_nodes,
                        taskState=PASS,
                        errorMessage=expected_error_message)
                    if found:
                        logger._log_to_console_and_log_file(
                            "Error message '%s' found" %
                            expected_error_message)
                    else:
                        if WFT2_CONTINUE_ON_ERROR:
                            # failingTasks.append(task['uri'])
                            taskData['uri'] = task['uri']
                            taskData['name'] = resourceName
                            failingTasks.append(taskData)
                            logger.warn(
                                "{0} '{1}' Failed: Error message '{2}' not found"
                                .format(resourceType, resourceName,
                                        expected_error_message))
                        else:
                            logger.warn(
                                "{0} '{1}' Failed: Error message is not found".
                                format(resourceType, resourceName))
                            task_msg = get_task_error_message(task_nodes)
                            out_str = "{0} not found.\nActual message is : {1}".format(
                                expected_error_message, task_msg)
                            raise AssertionError(
                                "Error message : {}".format(out_str))
                else:
                    if 'message' in task:
                        actMessage = task['message']
                        logger._log_to_console_and_log_file("actMessage: %s" %
                                                            actMessage)

                    if ('taskErrors' in task) and ('message'
                                                   in task['taskErrors'][0]):
                        actMessage = task['taskErrors'][0]['message']
                        logger._log_to_console_and_log_file("actMessage: %s" %
                                                            actMessage)

                    if WFT2_CONTINUE_ON_ERROR:
                        # failingTasks.append(task['uri'])
                        taskData['uri'] = task['uri']
                        taskData['name'] = resourceName
                        failingTasks.append(taskData)
                        logger.warn(
                            "{0} '{1}' Failed: Task Error but no expected error was specified."
                            .format(resourceType, resourceName))
                    else:
                        logger.warn(
                            "{0} '{1}' Failed: Task Error but no expected error was specified."
                            .format(resourceType, resourceName))
                        logger.warn(
                            "Actual error message: {}".format(actMessage))
                        raise AssertionError(
                            "Task Error but no expected error was specified.")
            else:
                if WFT2_CONTINUE_ON_ERROR:
                    # failingTasks.append(task['uri'])
                    taskData['uri'] = task['uri']
                    taskData['name'] = resourceName
                    failingTasks.append(taskData)
                    logger.warn(
                        "Task broke loop, but no taskError was returned.")
                else:
                    raise AssertionError(
                        "{0} '{1}' Failed: Task broke loop, but no taskError was returned."
                        .format(resourceType, resourceName))

    if WFT2_CONTINUE_ON_ERROR and len(failingTasks):
        logger._log_to_console_and_log_file(failingTasks)
        for task in failingTasks:
            logger._log_to_console_and_log_file("Failed {0} name: {1}".format(
                resourceType, task['name']))
        raise AssertionError("There were %s failing tasks." %
                             len(failingTasks))
    return True
Exemplo n.º 19
0
    def run_random_path(self,
                        type="online",
                        file=None,
                        transitions=None,
                        minutes=None):
        '''
        Run a random path with set transitions and/or minutes
        usage: run_random_path(transitions, minutes, type, file)
         transitions -- maximum number of transitions to run
         minutes -- maximum number of minutes to run
         type -- online or offline (defaults to onliine)
         file -- file to log transitions
        '''
        rc = True

        # Validate arguments
        if minutes is None and transitions is None:
            raise AttributeError("run_random_path requires 'minutes' or \
                            'transitions' argument")

        if string.lower(type) != "offline" and string.lower(type) != "online":
            raise AttributeError(
                "run_random_path requires 'type' argument set to \
                            offline or online. (%s)" % type)

        if string.lower(type) == "offline" and file is None:
            raise AttributeError("run_random_path requires 'file' arg for \
                            offline run")

        if string.lower(type) == "offline" and minutes is not None:
            raise AttributeError("can't use 'minutes' constraint in offline \
                                test eneration")
        if not self.scxml:
            raise AttributeError("can't run_random_path without SCXML to \
                                constructor")

        # Open file handle if needed
        fh = None
        if file is not None:
            fh = open(file, "w")

        transitions_taken = 0
        start = time.time()

        # Verify initial state(s)
        curr_states = self._get_full_state(self.root.attrib['initial'])

        if file is not None:
            for state in curr_states:
                fh.write("state: " + state['id'] + "\n")

        if string.lower(type) == "online":
            for state in curr_states:
                rc = self._verify_state(state['id'])
                if not rc:
                    return rc

        while True:
            # Check for exit conditions
            if minutes is not None:
                if (time.time() - start) > (int(minutes) * 60):
                    break
            if transitions is not None:
                transitions_taken += 1
                if transitions_taken > int(transitions):
                    break

            # get list of possible transitions
            available_transitions = []
            for state in curr_states:
                for transition in state['transition']:
                    if self._cond_met(transition, curr_states):
                        available_transitions.append(transition)

            # Pick random transition
            transition = available_transitions[random.randint(
                0,
                len(available_transitions) - 1)]
            curr_states = self._get_next_state(curr_states, transition)

            # Log choice to file if needed
            if string.lower(type) == "offline":
                logging._debug("Writing event '%s' to file." %
                               transition['event'])
                fh.write("event: " + transition['event'] + "\n")
                for state in curr_states:
                    logging._debug("Writing state '%s' to file." % state['id'])
                    fh.write("state: " + state['id'] + "\n")
            if string.lower(type) == "online":
                # Execute event for the transition
                rc = self._verify_event(transition['event'])
                if not rc:
                    return rc
                # Verify resulting states from transition
                for state in curr_states:
                    rc = self._verify_state(state['id'])
                    if not rc:
                        return rc
        return rc