Exemplo n.º 1
0
 def run_tests(self) -> bool:
     try:
         self._compose.up()
         variables = {}
         if self.inventory is not None:
             variables = read_source_file(self.inventory)
             variables['INVENTORY'] = get_filename(self.inventory)
             variables = try_get_object(fill_template_str(
                 variables, {}))  # fill env vars
         variables['CURRENT_DIR'] = self.path
         test_files = get_files(self.tests_path)
         results = []
         for file in test_files:
             self.all_includes = []
             try:
                 variables['TEST_NAME'] = file
                 test = self.prepare_test(file, variables)
                 test.run()
                 results.append(True)
                 info('Test ' + file + ' passed.')
             except Exception as e:
                 warning('Test ' + file + ' failed: ' + str(e))
                 results.append(False)
         return all(results)
     finally:
         self._compose.down()
Exemplo n.º 2
0
 def _run_test(self,
               test: Test,
               global_variables: dict,
               output: str = 'full',
               test_type='test') -> bool:
     try:
         self.var_holder.prepare_variables(test, global_variables)
         logger.log_storage.test_start(test.file, test_type=test_type)
         test.check_ignored()
         with OptionalOutput(output == 'limited'):
             test.run()
         info(test_type.capitalize() + ' ' +
              cut_path(self.tests_path, test.file) +
              logger.green(' passed.'))
         logger.log_storage.test_end(test.file, True, test_type=test_type)
         return True
     except SkipException:
         info(test_type.capitalize() + ' ' +
              cut_path(self.tests_path, test.file) +
              logger.yellow(' skipped.'))
         logger.log_storage.test_end(test.file,
                                     True,
                                     end_comment='Skipped',
                                     test_type=test_type)
         return True
     except Exception as e:
         warning(test_type.capitalize() + ' ' +
                 cut_path(self.tests_path, test.file) +
                 logger.red(' failed: ') + str(e))
         debug(traceback.format_exc())
         logger.log_storage.test_end(test.file,
                                     False,
                                     str(e),
                                     test_type=test_type)
         return False
Exemplo n.º 3
0
def fill_connections(inventory, conf, dialect, fernet_key):
    """
    Populate airflow connections based on catcher's inventory file
    :param inventory: path to inventory file
    :param conf: db configuration
    :param dialect: dialect
    :param fernet_key: is used in passwords encryption
    :return:
    """
    inv_dict = file_utils.read_source_file(inventory)
    engine = db_utils.get_engine(conf, dialect)
    with engine.connect() as connection:
        for name, value in inv_dict.items():
            try:
                if isinstance(value, dict) and 'type' in value:
                    if _check_conn_id_exists(name, connection):
                        debug('{} already exists'.format(name))
                        continue
                    query, params = _prepare_connection(value, fernet_key)
                    from sqlalchemy.sql import text
                    connection.execute(text(query), name=name, **params)
                    debug('{} added'.format(name))
                else:
                    debug('{} ignored. No type specified'.format(name))
            except Exception as e:
                warning('Can\'t add {}:{} - {}'.format(name, value, e))
Exemplo n.º 4
0
def run_cmd_simple(cmd: str,
                   variables: dict,
                   env=None,
                   args: List[str] = None,
                   libraries=None) -> Union[dict, str]:
    """
    Run cmd with variables written in environment.
    :param args: cmd arguments
    :param cmd: to run
    :param variables: variables
    :param env: custom environment
    :param libraries: additional libraries used for source compilation
    :return: output in json (if can be parsed) or plaintext
    """
    env = _prepare_env(variables, env=env)
    cmd, cwd = _prepare_cmd(cmd, args, variables, libraries=libraries)
    p = subprocess.Popen(cmd,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.STDOUT,
                         env=env,
                         cwd=cwd)
    if p.wait() == 0:
        out = p.stdout.read().decode()
        debug(out)
        return _parse_output(out)
    else:
        out = p.stdout.read().decode()
        warning(out)
        raise Exception('Execution failed.')
Exemplo n.º 5
0
    def _wait_for_running(url,
                          dag_id,
                          execution_date,
                          timeout,
                          run_id,
                          dialect=None,
                          db_conf=None):
        while True:
            try:
                state = airflow_client.get_run_status(url, dag_id,
                                                      execution_date)
            except OldAirflowVersionException:
                warning(
                    "Your airflow version does not support rest API method for DAG status. Call backend db directly"
                )
                state = airflow_db_client.get_dag_run_by_run_ud(
                    run_id=run_id, conf=db_conf, dialect=dialect)["state"]

            debug(state)
            if state.lower() != 'running':
                return state.lower()
            if timeout > 0:
                sleep(1)
                timeout -= 1
            else:
                raise Exception('Dag {} still running'.format(dag_id))
Exemplo n.º 6
0
def __env_to_variables(environment: list) -> dict:
    variables = {}
    for env in environment:
        if '=' not in env:
            warning('Skip not kv env param ' + env)
        else:
            [k, v] = env.split('=')
            variables[k] = v
    return variables
Exemplo n.º 7
0
 def __form_files(self, variables) -> Optional[list]:
     if self.files is not None:
         if isinstance(self.files, dict):
             return [self.__prepare_file(self.files, variables)]
         elif isinstance(self.files, list):
             return [self.__prepare_file(f, variables) for f in self.files]
         else:
             warning('Don\'t know how to prepare ' + type(self.files))
     return None
Exemplo n.º 8
0
 def publish(connection_parameters, exchange, routing_key, headers, message, disconnect_timeout):
     import pika
     from pika import exceptions
     properties = pika.BasicProperties(headers=headers)
     try:
         connection_parameters.blocked_connection_timeout = disconnect_timeout
         with pika.BlockingConnection(connection_parameters) as connection:
             channel = connection.channel()
             channel.basic_publish(exchange=exchange, routing_key=routing_key, properties=properties, body=message)
     except exceptions.ConnectionClosed:
         warning('Failed to gracefully close rabbit connection.')
Exemplo n.º 9
0
def __fill_dialect(url: str, driver: str):
    if '://' not in url:  # simple case - user:password@host:port/database
        return driver + '://' + url
    else:
        if '+' in url or url.startswith('postgresql'):
            return url  # dialect specified - mysql+pymysql://user:password@host:port/database
        else:  # no dialect - need to set up dialect from driver
            driver_used = url.split(':')[0]
            found = [k for k in default_ports.keys() if k.startswith(driver_used)]
            if not found:
                warning('Can\'t find dialect for driver ' + driver_used + '. Will try default ' + driver)
                found = [driver]
            return found[0] + ':' + ':'.join(url.split(':')[1:])
Exemplo n.º 10
0
def add_package_to_globals(package: str,
                           glob=None,
                           warn_missing_package=True) -> dict:
    if glob is None:
        glob = globals()
    try:
        mod = importlib.import_module(package)
        glob[package] = mod
    except ImportError as e:
        if warn_missing_package:
            warning(str(e))
        else:
            debug(str(e))
    return glob
Exemplo n.º 11
0
def prepare_modules(module_paths: list, available: dict) -> dict:
    """
    Scan all paths for external modules and form key-value dict.
    :param module_paths: list of external modules (either python packages or third-party scripts)
    :param available: dict of all registered python modules (can contain python modules from module_paths)
    :return: dict of external modules, where keys are filenames (same as stepnames) and values are the paths
    """
    indexed = {}
    for path in module_paths:
        if not os.path.exists(path) and path not in available:
            err = 'No such path: ' + path
            error(err)
        else:
            for f in os.listdir(path):
                mod_path = join(path, f)
                if f in indexed:
                    warning('Override ' + indexed[f] + ' with ' + mod_path)
                indexed[f] = mod_path
    return indexed
Exemplo n.º 12
0
    def _run_dag(self, oper, inventory):
        dag_id = oper['dag_id']
        config = oper['config']
        db_conf = config['db_conf']
        backend = config.get('backend', 'postgresql')
        url = config['url']
        self._prepare_dag_run(dag_id, config, inventory)
        dag_config = oper.get('dag_config', {})

        run_id = airflow_client.trigger_dag(url, dag_id, dag_config)
        sync = oper.get('sync', False)
        if not sync:
            return run_id
        else:
            wait_timeout = oper.get('wait_timeout', 5)
            try:
                execution_date = airflow_client.get_dag_run(
                    url, dag_id, run_id)['execution_date']
            except OldAirflowVersionException:
                warning(
                    "Your airflow version does not support rest API method: dag_runs. Call backend db directly"
                )
                execution_date = airflow_db_client.get_execution_date_by_run_ud(
                    run_id=run_id, conf=db_conf, dialect=backend)

            state = self._wait_for_running(url=url,
                                           dag_id=dag_id,
                                           execution_date=execution_date,
                                           timeout=wait_timeout,
                                           run_id=run_id,
                                           dialect=backend,
                                           db_conf=db_conf)
            if state != 'success':
                failed_task = airflow_db_client.get_failed_task(
                    dag_id=dag_id,
                    execution_time=execution_date,
                    conf=db_conf,
                    dialect=backend)
                raise Exception('Dag {} failed task {} with state {}'.format(
                    dag_id, failed_task, state))
            return run_id
Exemplo n.º 13
0
 def _run_finally(self, test, result: bool):
     if test and test.final:
         logger.log_storage.test_start(test.file,
                                       test_type='{}_cleanup'.format(
                                           test.file))
         try:
             test.run_finally(result)
             info('Test ' + cut_path(self.tests_path, test.file) +
                  ' [cleanup] ' + logger.green(' passed.'))
             logger.log_storage.test_end(test.file,
                                         True,
                                         test_type='{} [cleanup]'.format(
                                             test.file))
         except Exception as e:
             warning('Test ' + cut_path(self.tests_path, test.file) +
                     ' [cleanup] ' + logger.red(' failed: ') + str(e))
             debug(traceback.format_exc())
             logger.log_storage.test_end(test.file,
                                         False,
                                         test_type='{} [cleanup]'.format(
                                             test.file))
Exemplo n.º 14
0
 def _prepare_dag_run(dag_id, config, inventory):
     db_conf = config['db_conf']
     backend = config.get('backend', 'postgresql')
     url = config['url']
     if not airflow_db_client.check_dag_exists(dag_id, db_conf, backend):
         errors = airflow_db_client.check_import_errors(
             dag_id, db_conf, backend)
         msg = 'No dag {} found.'.format(dag_id)
         if errors:
             msg = msg + ' Possible import errors: {}'.format(str(errors))
         raise Exception(msg)
     if inventory is not None and config.get('populate_connections', False):
         # fill connections from inventory to airflow
         airflow_db_client.fill_connections(inventory, db_conf, backend,
                                            config['fernet_key'])
     try:
         airflow_client.unpause_dag(url, dag_id)
     except OldAirflowVersionException:
         warning(
             "Your airflow version does not support rest API method to unpause dag. Call backend db directly"
         )
         airflow_db_client.unpause_dag(dag_id, db_conf, backend)
Exemplo n.º 15
0
 def run_tests(self, output: str = 'full') -> bool:
     """
     Run the testcase
     :param output: 'full' - all output possible. 'limited' - only test end report & summary. 'final' - only summary.
     """
     try:
         [mod.before() for mod in ModulesFactory().modules.values()]
         results = []
         for parse_result in self.parser.read_tests(self.tests_path):
             if parse_result.should_run:  # parse successful
                 variables = self.var_holder.variables  # each test has it's own copy of global variables
                 variables[
                     'TEST_NAME'] = parse_result.test.file  # variables are shared between test and includes
                 with OptionalOutput(output == 'final'):
                     for include in parse_result.run_on_include:  # run all includes before the main test.
                         self._run_test(include,
                                        variables,
                                        output=output,
                                        test_type='include')
                     result = self._run_test(parse_result.test,
                                             variables,
                                             output=output)
                     results.append(result)
                     self._run_finally(parse_result.test, result)
             else:  # parse failed (dependency/parsing problem)
                 warning('Test ' +
                         cut_path(self.tests_path, parse_result.test) +
                         logger.red(' failed: ') +
                         str(parse_result.parse_error))
                 logger.log_storage.test_parse_fail(
                     parse_result.test, parse_result.parse_error)
                 results.append(False)
         return all(results)
     finally:
         logger.log_storage.write_report(join(self.path, 'reports'))
         logger.log_storage.print_summary(self.tests_path)
         [mod.after() for mod in ModulesFactory().modules.values()]
Exemplo n.º 16
0
 def run_tests(self) -> bool:
     try:
         self._compose.up()
         if self.system_vars:
             debug('Use system variables: ' +
                   str(list(self.system_vars.keys())))
             variables = self.system_vars
         else:
             variables = {}
         if self.inventory is not None:
             inv_vars = read_source_file(self.inventory)
             inv_vars['INVENTORY'] = get_filename(self.inventory)
             variables = try_get_object(
                 fill_template_str(inv_vars, variables))  # fill env vars
         variables['CURRENT_DIR'] = self.path
         variables[
             'RESOURCES_DIR'] = self.resources or self.path + '/resources'
         test_files = get_files(self.tests_path)
         results = []
         for file in test_files:
             self.all_includes = []
             try:
                 variables['TEST_NAME'] = file
                 test = self.prepare_test(file, variables)
                 logger.log_storage.test_start(file)
                 test.run()
                 results.append(True)
                 info('Test ' + file + ' passed.')
                 logger.log_storage.test_end(file, True)
             except Exception as e:
                 warning('Test ' + file + ' failed: ' + str(e))
                 results.append(False)
                 logger.log_storage.test_end(file, False, str(e))
         return all(results)
     finally:
         logger.log_storage.write_report(self.path)
         self._compose.down()