Example #1
0
    def validate_json_value(self, jmes_path, expected, case_sensitive=False):
        """Validate a value in the response JSON referred to by the JMES Path
        
        Args:
            jmes_path (str): JMES Path to the actual value in the response JSON
            expected (Any): Expected value
        """
        logger.log_info(
            TAG, '{}: Starting validation for JSON Comparison'.format(
                self.test_step.name))
        actual = utils.resolve_jmes(
            jmes_path, json.loads(self.test_step.response.response_body))

        if not case_sensitive and isinstance(actual, str) and isinstance(
                expected, str):
            actual = actual.lower()
            expected = expected.lower()

        if actual == expected:
            return True
        else:
            self.message += 'JSON value Comparison failed for field {}. Expected: {}, Actual: {}'.format(
                jmes_path, expected, actual)
            self.status = TestBase.ValidationStatus.FAILED
            return False
Example #2
0
    def execute(self):
        '''
        Sends the HTTP request and stores the response
        '''
        logger.log_info(TAG, '{} - starting execution'.format(self.name))
        start_time = datetime.now()

        if self.action is RequestMethod.GET:
            r = requests.get(self.target, headers=self.headers)
        elif self.action is RequestMethod.POST:
            r = requests.post(self.target,
                              json=self.body,
                              headers=self.headers)
        elif self.action is RequestMethod.PUT:
            r = requests.put(self.target, json=self.body, headers=self.headers)
        elif self.action is RequestMethod.DELETE:
            r = requests.delete(self.target, headers=self.headers)
        else:
            raise NotImplementedError('ActionType unknown for ApiStep')

        # r.raise_for_status()
        response_tuple = namedtuple('response_tuple',
                                    ['response_code', 'response_body'])
        self.response = response_tuple(r.status_code, r.text)

        end_time = datetime.now()
        self.duration = end_time - start_time
        logger.log_info(
            TAG, '{} - finished execution in {} seconds'.format(
                self.name, self.duration.seconds))
Example #3
0
File: layers.py Project: mrajp7/gtf
 def tearDown(cls):
     """
     The tearDown should always be a classmethod
     In the tearDown, we cleanup the resources used in the tests
     In this case, we close the log file
     """
     logger.log_info(TAG, 'BaseLayer tearDown')
     logger.close_log_file()
Example #4
0
    def init_config(cls):
        cls.config = ConfigObj(cls.config_file)
        try:
            # Reading any value from the config file
            cls.VALUE = cls.config['default']['some_key']

            # Initializing the Endpoints reader. Keeping it here because it will be needed across the project
            cls.endpoints_reader = ExcelReader(cls.ENDPOINTS_FILE)
        except KeyError:
            logger.log_exception(TAG)
Example #5
0
File: utils.py Project: mrajp7/gtf
def create_time_stamped_dir(path='', prefix=''):
    '''
    Creates a dir with a time stamp in given path
    
    Args:
        path (str): parent dir path to create dir
        prefix (str): A prefix to add before the timestamp in the dir name
    '''
    logger.log_info(TAG, 'Creating time-stamped directory')
    dir_path = os.path.join(path,
                            prefix + datetime.now().strftime('_%Y-%m-%d_%X'))
    os.makedirs(dir_path)
    logger.log_info(TAG, 'Created' + dir_path)
    return dir_path
Example #6
0
File: layers.py Project: mrajp7/gtf
 def testSetUp(cls, test):
     """
     A testSetUp is run before EACH test that uses this layer
     
     Args:
         test (test class): The test class that is running
     """
     logger.log_info(TAG, 'DerivedLayer testSetUp')
     if hasattr(test, 'needs_prep'):
         if test.needs_prep:
             logger.log_info(
                 TAG,
                 'Doing some special preperation for some tests where an attribute is set'
             )
Example #7
0
 def validate_response_code(self, expected_codes=[200]):
     """Validate response status code for the API step
     
     Args:
         expected_codes (list, optional): A list of accepted response codes for the request. Defaults to [200].
     """
     logger.log_info(
         TAG, '{}: Starting validation for response code'.format(
             self.test_step.name))
     if self.test_step.response.response_code in expected_codes:
         return True
     else:
         self.message += 'Response code validation failed. Expected: {}, Actual: {}'.format(
             expected_codes, self.test_step.response.response_code)
         self.status = TestBase.ValidationStatus.FAILED
         return False
Example #8
0
 def validate_jsons_for_equality(self, expected, actual):
     """Validate 2 JSON objects to see if they are equal
     
     Args:
         expected (dict): Expected JSON value
         actual (dict): Actual JSON value to compare
     """
     logger.log_info(
         TAG, '{}: Starting validation for JSON Comparison'.format(
             self.test_step.name))
     dff = jsondiff.diff(expected, actual)
     if dff is None:
         return True
     else:
         self.message += 'JSON Comparison failed.'  # No expected and actual because they could be too large
         self.status = TestBase.ValidationStatus.FAILED
         return False
Example #9
0
File: layers.py Project: mrajp7/gtf
    def setUp(cls):
        """
        The setUp method should always be a classmethod.
        We should create an output folder where the report and the logs will be saved.
        We should initialize the logger here.
        We should initialize the config reader, if any.
        """
        logger.log_info(TAG, 'BaseLayer setUp')

        # Initialize and read the configuration
        TestConfig.init_config()

        # Create a test run dir
        TestConfig.output_dir = utils.create_time_stamped_dir(
            path='./output', prefix='test_run')
        # Set up the logger
        logger.init_log_to_file(TestConfig.output_dir)
Example #10
0
 def save_response_to_file(self, file_path):
     """
     Saves the response of the API request to a file
     
     Args:
         file_path (str): File path to where the response should be saved
     """
     try:
         with open(file_path, 'w') as outf:
             json.dump(json.loads(self.response.response_body),
                       outf,
                       indent=4)
     except IOError:
         logger.log_error(
             TAG,
             'Could not save response to file. Check if path exists and you have permission'
         )
         logger.log_exception(TAG)
     except json.decoder.JSONDecodeError:
         logger.log_warn(
             TAG,
             'Response not in JSON format:\n' + self.response.response_body)
Example #11
0
File: layers.py Project: mrajp7/gtf
 def tearDown(cls):
     logger.log_info(
         TAG, 'DerivedLayer tearDown - This runs before BaseLayer tearDown')
Example #12
0
File: layers.py Project: mrajp7/gtf
 def setUp(cls):
     logger.log_info(TAG, 'DerivedLayer setUp - Doing nothing here')