Exemplo n.º 1
0
    def _call_function(self):

        retries = self._max_retries
        if not retries:
            retries = 1

        exp = None
        result = None
        traceback_text = None
        for retry_index in range(retries):
            exp = None
            traceback_text = None
            try:
                logger.info(
                    'calling shared function: {} - retry index: {}'.format(
                        self._function_key, retry_index))
                result = self._function(*self._function_args,
                                        **self._function_kwargs)
                break
            except Exception as err:
                exp = err
                result = None
                logger.exception(exp)
                _, _, traceback_ = sys.exc_info()
                traceback_text = ''.join(traceback.format_tb(traceback_))

        return result, exp, traceback_text
Exemplo n.º 2
0
def test_negative_backup_incremental_nodir(sat_maintain, setup_backup_tests,
                                           backup_type):
    """Try to take an incremental backup providing non-existing path to the previous backup
    (expected after --incremental option)

    :id: 4efec2fb-810b-4636-ae26-422a6bcb43cc

    :steps:
        1. try to create an incremental backup with non-existing path provided

    :expectedresult:
        1. should fail with appropriate error message
    """
    subdir = f'{BACKUP_DIR}backup-{gen_string("alpha")}'
    result = sat_maintain.cli.Backup.run_backup(
        backup_dir='',
        backup_type=backup_type,
        options={
            'assumeyes': True,
            'plaintext': True,
            'incremental': subdir
        },
    )
    logger.info(result.stdout)
    assert result.status != 0
    assert NOPREV_MSG in str(result.stderr)
Exemplo n.º 3
0
        def function_wrapper(*args, **kwargs):
            function_name = _get_function_name(func, class_name=class_name)
            lock_file_path = _get_function_name_lock_path(
                function_name,
                scope=scope,
                scope_kwargs=scope_kwargs,
                scope_context=scope_context)
            process_id = str(os.getpid())
            # to prevent dead lock when recursively calling this function
            # check if the same process is trying to acquire the lock
            _check_deadlock(lock_file_path, process_id)

            with file_lock(lock_file_path, remove=False,
                           timeout=timeout) as handler:
                logger.info(
                    'process id: {} lock function using file path: {}'.format(
                        process_id, lock_file_path))
                # write the process id that locked this function
                _write_content(handler, process_id)
                # call the locked function
                try:
                    res = func(*args, **kwargs)
                finally:
                    # clear the file
                    _write_content(handler, None)

            return res
Exemplo n.º 4
0
def test_positive_synchronize_custom_product_weekly_recurrence(module_org):
    """Create a weekly sync plan with a past datetime as a sync date,
    add a custom product and verify the product gets synchronized on
    the next sync occurrence

    :id: ef52dd8e-756e-429c-8c30-b3e7db2b6d61

    :expectedresults: Product is synchronized successfully.

    :BZ: 1396647

    :CaseLevel: System
    """
    delay = 2 * 60
    product = entities.Product(organization=module_org).create()
    repo = entities.Repository(product=product).create()
    start_date = datetime.utcnow().replace(second=0) - timedelta(weeks=1) + timedelta(seconds=delay)
    # Create and Associate sync plan with product
    sync_plan = entities.SyncPlan(
        organization=module_org, enabled=True, interval='weekly', sync_date=start_date
    ).create()
    sync_plan.add_products(data={'product_ids': [product.id]})
    # Wait quarter of expected time
    logger.info(f'Waiting {delay / 4} seconds to check product {product.name} was not synced')
    sleep(delay / 4)
    # Verify product is not synced and doesn't have any content
    with pytest.raises(AssertionError):
        validate_task_status(repo.id, max_tries=1)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'], after_sync=False)
    # Wait the rest of expected time
    logger.info(f'Waiting {delay * 3 / 4} seconds to check product {product.name} was synced')
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo.id, repo_backend_id=repo.backend_identifier)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'])
Exemplo n.º 5
0
def test_positive_synchronize_rh_product_past_sync_date():
    """Create a sync plan with past datetime as a sync date, add a
    RH product and verify the product gets synchronized on the next sync
    occurrence

    :id: 080c316d-4a06-4ee9-b5f6-1b210d8d0593

    :expectedresults: Product is synchronized successfully.

    :BZ: 1279539

    :CaseLevel: System
    """
    interval = 60 * 60  # 'hourly' sync interval in seconds
    delay = 2 * 60
    org = entities.Organization().create()
    with manifests.clone() as manifest:
        entities.Subscription().upload(
            data={'organization_id': org.id}, files={'content': manifest.content}
        )
    repo_id = enable_rhrepo_and_fetchid(
        basearch='x86_64',
        org_id=org.id,
        product=PRDS['rhel'],
        repo=REPOS['rhst7']['name'],
        reposet=REPOSET['rhst7'],
        releasever=None,
    )
    product = entities.Product(name=PRDS['rhel'], organization=org).search()[0]
    repo = entities.Repository(id=repo_id).read()
    sync_plan = entities.SyncPlan(
        organization=org,
        enabled=True,
        interval='hourly',
        sync_date=datetime.utcnow() - timedelta(seconds=interval - delay),
    ).create()
    # Associate sync plan with product
    sync_plan.add_products(data={'product_ids': [product.id]})
    # Wait quarter of expected time
    logger.info(
        f"Waiting {(delay / 4)} seconds to check product {product.name}"
        f" was not synced by {sync_plan.name}"
    )
    sleep(delay / 4)
    # Verify product has not been synced yet
    with pytest.raises(AssertionError):
        validate_task_status(repo.id, org.id, max_tries=1)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'], after_sync=False)
    # Wait until the next recurrence
    logger.info(
        f"Waiting {(delay * 3 / 4)} seconds to check product {product.name}"
        f" was synced by {sync_plan.name}"
    )
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo.id, org.id)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'])
Exemplo n.º 6
0
def test_positive_synchronize_custom_products_future_sync_date(module_org):
    """Create a sync plan with sync date in a future and sync multiple
    custom products with multiple repos automatically.

    :id: dd262cf3-b836-422c-baca-b3adbc532478

    :expectedresults: Products are synchronized successfully.

    :CaseLevel: System

    :BZ: 1655595
    """
    delay = 2 * 60  # delay for sync date in seconds
    products = [
        make_product({'organization-id': module_org.id}) for _ in range(3)
    ]
    repos = [
        make_repository({'product-id': product['id']}) for product in products
        for _ in range(2)
    ]
    sync_plan = make_sync_plan({
        'enabled':
        'true',
        'organization-id':
        module_org.id,
        'sync-date': (datetime.utcnow().replace(second=0) +
                      timedelta(seconds=delay)).strftime(SYNC_DATE_FMT),
        'cron-expression': ["*/4 * * * *"],
    })
    # Verify products have not been synced yet
    for repo in repos:
        with pytest.raises(AssertionError):
            validate_task_status(repo['id'], max_tries=1)
    # Associate sync plan with products
    for product in products:
        Product.set_sync_plan({
            'id': product['id'],
            'sync-plan-id': sync_plan['id']
        })
    # Wait quarter of expected time
    logger.info('Waiting {} seconds to check products were not synced'.format(
        delay / 4))
    sleep(delay / 4)
    # Verify products has not been synced yet
    for repo in repos:
        with pytest.raises(AssertionError):
            validate_task_status(repo['id'], max_tries=1)
    # Wait the rest of expected time
    logger.info('Waiting {} seconds to check products were synced'.format(
        delay * 3 / 4))
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    for repo in repos:
        validate_task_status(repo['id'], repo_name=repo['name'])
        validate_repo_content(repo, ['errata', 'package-groups', 'packages'])
Exemplo n.º 7
0
def test_positive_synchronize_custom_product_future_sync_date(module_org):
    """Create a sync plan with sync date in a future and sync one custom
    product with it automatically.

    :id: 635bffe2-df98-4971-8950-40edc89e479e

    :expectedresults: Product is synchronized successfully.

    :CaseLevel: System

    :BZ: 1655595
    """
    cron_multiple = 5  # sync event is on every multiple of this value, starting from 00 mins
    delay = (cron_multiple) * 60  # delay for sync date in seconds
    guardtime = 180  # do not start test less than 3 mins before the next sync event
    product = make_product({'organization-id': module_org.id})
    repo = make_repository({'product-id': product['id']})
    # if < 3 mins before the target event rather wait 3 mins for the next test window
    if int(datetime.utcnow().strftime('%M')) % (cron_multiple) > int(
            guardtime / 60):
        sleep(guardtime)
    sync_plan = make_sync_plan({
        'enabled':
        'true',
        'organization-id':
        module_org.id,
        'sync-date': (datetime.utcnow().replace(second=0) +
                      timedelta(seconds=delay)).strftime(SYNC_DATE_FMT),
        'cron-expression': [f'*/{cron_multiple} * * * *'],
    })
    # Verify product is not synced and doesn't have any content
    validate_repo_content(repo, ['errata', 'packages'], after_sync=False)
    # Associate sync plan with product
    Product.set_sync_plan({
        'id': product['id'],
        'sync-plan-id': sync_plan['id']
    })
    # Wait quarter of expected time
    logger.info(
        f"Waiting {(delay / 4)} seconds to check product {product['name']}"
        f" was not synced by {sync_plan['name']}")
    sleep(delay / 4)
    # Verify product has not been synced yet
    with pytest.raises(AssertionError):
        validate_task_status(repo['id'], module_org.id, max_tries=1)
    validate_repo_content(repo, ['errata', 'packages'], after_sync=False)
    # Wait the rest of expected time
    logger.info(
        f"Waiting {(delay * 3 / 4)} seconds to check product {product['name']}"
        f" was synced by {sync_plan['name']}")
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo['id'], module_org.id)
    validate_repo_content(repo, ['errata', 'package-groups', 'packages'])
Exemplo n.º 8
0
def pytest_terminal_summary(terminalreporter, exitstatus):
    """Add a section to terminal summary reporting."""
    if PRE_UPGRADE:
        # Save the failed tests to file
        failed_test_reports = []
        for key in ['failed', 'error', 'skipped']:
            failed_test_reports.extend(terminalreporter.stats.get(key, []))
        failed_test_node_ids = [test_report.nodeid for test_report in failed_test_reports]
        logger.info('Save failed tests to file %s', PRE_UPGRADE_TESTS_FILE_PATH)
        with open(PRE_UPGRADE_TESTS_FILE_PATH, 'w') as json_file:
            json.dump(failed_test_node_ids, json_file)
Exemplo n.º 9
0
def test_positive_backup_split_pulp_tar(sat_maintain, setup_backup_tests,
                                        module_synced_repos, backup_type):
    """Take a backup, ensure that '--split-pulp-tar' option works

    :id: ddc3609d-642f-4161-b7a1-54f4aa069c08

    :setup:
        1. repo with sufficient size synced to the server

    :steps:
        1. create a backup using the split option
        2. check that appropriate files are created
        3. check that pulp_data.tar fits the split size

    :expectedresult:
        1. backup succeeds
        2. expected files are present in the backup
        3. size of the pulp_data.tar smaller than provided value
    """
    subdir = f'{BACKUP_DIR}backup-{gen_string("alpha")}'
    set_size = 100
    result = sat_maintain.cli.Backup.run_backup(
        backup_dir=subdir,
        backup_type=backup_type,
        options={
            'assumeyes': True,
            'plaintext': True,
            'split-pulp-tar': f'{set_size}k'
        },
    )
    logger.info(result.stdout)
    assert result.status == 0
    assert 'FAIL' not in result.stdout

    # Check for expected files
    backup_dir = re.findall(fr'{subdir}\/satellite-backup-.*-[0-5][0-9]',
                            result.stdout)[0]
    files = sat_maintain.execute(f'ls -a {backup_dir}').stdout.split('\n')
    files = [i for i in files if not re.compile(r'^\.*$').search(i)]

    if sat_maintain.is_remote_db():
        expected_files = BASIC_FILES | REMOTE_SAT_FILES
    else:
        expected_files = (BASIC_FILES | OFFLINE_FILES if backup_type
                          == 'offline' else BASIC_FILES | ONLINE_SAT_FILES)
    assert set(files).issuperset(expected_files | CONTENT_FILES), assert_msg

    # Check the split works
    result = sat_maintain.execute(f'du {backup_dir}/pulp_data.tar')
    pulp_tar_size = int(result.stdout.split('\t')[0])
    assert pulp_tar_size <= set_size
Exemplo n.º 10
0
    def test_negative_get_unauthorized(self, entity_cls):
        """GET an entity-dependent path without credentials.

        :id: 49127c71-55a2-42d1-b418-59229e9bad00

        :parametrized: yes

        :expectedresults: HTTP 401 is returned

        :CaseImportance: Critical
        """
        logger.info('test_get_unauthorized arg: %s', entity_cls)
        response = client.get(entity_cls().path(), auth=(), verify=False)
        assert http.client.UNAUTHORIZED == response.status_code
Exemplo n.º 11
0
def test_positive_synchronize_custom_product_past_sync_date(module_org):
    """Create a sync plan with a past datetime as a sync date, add a
    custom product and verify the product gets synchronized on the next
    sync occurrence

    :id: 21efdd08-698c-443c-a681-edce19a4c83a

    :expectedresults: Product is synchronized successfully.

    :BZ: 1279539

    :CaseLevel: System
    """
    interval = 60 * 60  # 'hourly' sync interval in seconds
    delay = 2 * 60
    product = make_product({'organization-id': module_org.id})
    repo = make_repository({'product-id': product['id']})
    sync_plan = make_sync_plan({
        'enabled':
        'true',
        'interval':
        'hourly',
        'organization-id':
        module_org.id,
        'sync-date':
        (datetime.utcnow() -
         timedelta(seconds=interval - delay)).strftime(SYNC_DATE_FMT),
    })
    # Associate sync plan with product
    Product.set_sync_plan({
        'id': product['id'],
        'sync-plan-id': sync_plan['id']
    })
    # Wait quarter of expected time
    logger.info(
        f"Waiting {(delay / 4)} seconds to check product {product['name']}"
        f" was not synced by {sync_plan['name']}")
    sleep(delay / 4)
    # Verify product has not been synced yet
    with pytest.raises(AssertionError):
        validate_task_status(repo['id'], module_org.id, max_tries=1)
    validate_repo_content(repo, ['errata', 'packages'], after_sync=False)
    # Wait until the first recurrence
    logger.info(
        f"Waiting {(delay * 3 / 4)} seconds to check product {product['name']}"
        f" was synced by {sync_plan['name']}")
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo['id'], module_org.id)
    validate_repo_content(repo, ['errata', 'package-groups', 'packages'])
Exemplo n.º 12
0
def locking_function(
    function,
    scope=_get_default_scope,
    scope_context=None,
    scope_kwargs=None,
    timeout=LOCK_DEFAULT_TIMEOUT,
):
    """Lock a function in combination with a scope and scope_context.
    Any parallel pytest xdist worker will wait for this function to finish.

    :type function: callable
    :type scope: str or callable
    :type scope_kwargs: dict
    :type scope_context: str
    :type timeout: int

    :param function: the function that is intended to be locked
    :param scope: this parameter will define the namespace of locking
    :param scope_context: an added context string if applicable, of a concrete
           lock in combination with scope and function.
    :param scope_kwargs: kwargs to be passed to scope if is a callable
    :param timeout: the time in seconds to wait for acquiring the lock
    """
    if not getattr(function, '__function_locked__', False):
        raise FunctionLockerError(
            'Cannot ensure locking when using a non locked function')
    class_name = getattr(function, '__class_name__', None)
    function_name = _get_function_name(function, class_name=class_name)
    lock_file_path = _get_function_name_lock_path(function_name,
                                                  scope=scope,
                                                  scope_kwargs=scope_kwargs,
                                                  scope_context=scope_context)
    process_id = str(os.getpid())
    # to prevent dead lock when recursively calling this function
    # check if the same process is trying to acquire the lock
    _check_deadlock(lock_file_path, process_id)

    with file_lock(lock_file_path, remove=False, timeout=timeout) as handler:
        logger.info(
            'process id: {} - lock function name:{}  - using file path: {}'.
            format(process_id, function_name, lock_file_path))
        # write the process id that locked this function
        _write_content(handler, process_id)
        # let the locked code run
        try:
            yield handler
        finally:
            # clear the file
            _write_content(handler, None)
Exemplo n.º 13
0
def pytest_collection_modifyitems(items, config):
    """
    Collects and modifies tests collection based on pytest options to select tests marked as
    failed/skipped and user specific tests in Report Portal
    """
    fail_args = config.getoption('only_failed', False)
    skip_arg = config.getoption('only_skipped', False)
    user_arg = config.getoption('user', False)
    upgrades_rerun = config.getoption('upgrades_rerun', False)
    if not any([fail_args, skip_arg, user_arg, upgrades_rerun]):
        return
    rp = ReportPortal()
    version = settings.server.version
    sat_version = f'{version.base_version}.{version.epoch}'
    logger.info(
        f'Fetching Report Portal launches for target Satellite version: {sat_version}'
    )
    launch = next(
        iter(
            rp.launches(sat_version=sat_version,
                        launch_type='upgrades'
                        if upgrades_rerun else 'satellite6').values()))
    _validate_launch(launch, sat_version)
    test_args = {}
    test_args.setdefault('status', list())
    if fail_args:
        test_args['status'].append('failed')
        if not fail_args == 'all':
            defect_types = fail_args.split(',') if ',' in fail_args else [
                fail_args
            ]
            allowed_args = [*rp.defect_types.keys()]
            if not set(defect_types).issubset(set(allowed_args)):
                raise pytest.UsageError(
                    'Incorrect values to pytest option \'--only-failed\' are provided as '
                    f'\'{fail_args}\'. It should be none/one/mix of {allowed_args}'
                )
            test_args['defect_types'] = defect_types
    if skip_arg:
        test_args['status'].append('skipped')
    if user_arg:
        test_args['user'] = user_arg
    rp_tests = _get_tests(launch, **test_args)
    selected, deselected = _get_test_collection(rp_tests, items)
    logger.debug(
        f'Selected {len(selected)} and deselected {len(deselected)} tests based on latest '
        'launch test results.')
    config.hook.pytest_deselected(items=deselected)
    items[:] = selected
Exemplo n.º 14
0
def test_positive_logging_from_candlepin(module_org, default_sat):
    """Check logging after manifest upload.

    :id: 8c06e501-52d7-4baf-903e-7de9caffb066

    :expectedresults: line of logs with POST has request ID

    :CaseImportance: Medium
    """

    POST_line_found = False
    source_log = '/var/log/candlepin/candlepin.log'
    test_logfile = '/var/tmp/logfile_from_candlepin'
    # regex for a version 4 UUID (8-4-4-12 format)
    regex = r"\b[0-9a-f]{8}\b-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-\b[0-9a-f]{12}\b"
    # get the number of lines in the source log before the test
    line_count_start = line_count(source_log, default_sat)
    # command for this test
    with manifests.clone() as manifest:
        with NamedTemporaryFile(dir=robottelo_tmp_dir) as content_file:
            content_file.write(manifest.content.read())
            content_file.seek(0)
            default_sat.put(local_path=content_file.name,
                            remote_path=manifest.filename)
        Subscription.upload({
            'file': manifest.filename,
            'organization-id': module_org.id
        })
    # get the number of lines in the source log after the test
    line_count_end = line_count(source_log, default_sat)
    # get the log lines of interest, put them in test_logfile
    cut_lines(line_count_start, line_count_end, source_log, test_logfile,
              default_sat)
    # use same location on remote and local for log file extract
    default_sat.get(remote_path=test_logfile)
    # search the log file extract for the line with POST to candlepin API
    with open(test_logfile) as logfile:
        for line in logfile:
            if re.search(r'verb=POST, uri=/candlepin/owners/{0}',
                         line.format(module_org.name)):
                logger.info('Found the line with POST to candlepin API')
                POST_line_found = True
                # Confirm the request ID was logged in the line with POST
                match = re.search(regex, line)
                assert match, "Request ID not found"
                logger.info("Request ID found for logging from candlepin")
                break
    assert POST_line_found, "The POST command to candlepin was not found in logs."
Exemplo n.º 15
0
def test_positive_synchronize_custom_product_weekly_recurrence(module_org):
    """Create a weekly sync plan with a past datetime as a sync date,
    add a custom product and verify the product gets synchronized on
    the next sync occurrence

    :id: 1079a66d-7c23-44f6-a4a0-47f4c74d92a4

    :expectedresults: Product is synchronized successfully.

    :BZ: 1396647

    :CaseLevel: System
    """
    delay = 2 * 60
    product = make_product({'organization-id': module_org.id})
    repo = make_repository({'product-id': product['id']})
    start_date = datetime.utcnow() - timedelta(weeks=1) + timedelta(
        seconds=delay)
    sync_plan = make_sync_plan({
        'enabled':
        'true',
        'interval':
        'weekly',
        'organization-id':
        module_org.id,
        'sync-date':
        start_date.strftime("%Y-%m-%d %H:%M:%S"),
    })
    # Associate sync plan with product
    Product.set_sync_plan({
        'id': product['id'],
        'sync-plan-id': sync_plan['id']
    })
    # Wait quarter of expected time
    logger.info('Waiting {} seconds to check product {}'
                ' was not synced'.format(delay / 4, product['name']))
    sleep(delay / 4)
    # Verify product has not been synced yet
    with pytest.raises(AssertionError):
        validate_task_status(repo['id'], max_tries=1)
    validate_repo_content(repo, ['errata', 'packages'], after_sync=False)
    # Wait until the first recurrence
    logger.info('Waiting {} seconds to check product {}'
                ' was synced'.format((delay * 3 / 4), product['name']))
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo['id'], repo_name=repo['name'])
    validate_repo_content(repo, ['errata', 'package-groups', 'packages'])
Exemplo n.º 16
0
def test_positive_synchronize_custom_product_future_sync_date(module_org):
    """Create a sync plan with sync date in a future and sync one custom
    product with it automatically.

    :id: 635bffe2-df98-4971-8950-40edc89e479e

    :expectedresults: Product is synchronized successfully.

    :CaseLevel: System

    :BZ: 1655595
    """
    delay = 2 * 60  # delay for sync date in seconds
    product = make_product({'organization-id': module_org.id})
    repo = make_repository({'product-id': product['id']})
    sync_plan = make_sync_plan({
        'enabled':
        'true',
        'organization-id':
        module_org.id,
        'sync-date': (datetime.utcnow().replace(second=0) +
                      timedelta(seconds=delay)).strftime(SYNC_DATE_FMT),
        'cron-expression': ["*/4 * * * *"],
    })
    # Verify product is not synced and doesn't have any content
    validate_repo_content(repo, ['errata', 'packages'], after_sync=False)
    # Associate sync plan with product
    Product.set_sync_plan({
        'id': product['id'],
        'sync-plan-id': sync_plan['id']
    })
    # Wait quarter of expected time
    logger.info(
        f"Waiting {(delay / 4)} seconds to check product {product['name']}"
        f" was not synced by {sync_plan['name']}")
    sleep(delay / 4)
    # Verify product has not been synced yet
    with pytest.raises(AssertionError):
        validate_task_status(repo['id'], module_org.id, max_tries=1)
    validate_repo_content(repo, ['errata', 'packages'], after_sync=False)
    # Wait the rest of expected time
    logger.info(
        f"Waiting {(delay * 3 / 4)} seconds to check product {product['name']}"
        f" was synced by {sync_plan['name']}")
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo['id'], module_org.id)
    validate_repo_content(repo, ['errata', 'package-groups', 'packages'])
Exemplo n.º 17
0
def test_positive_backup_all(sat_maintain, setup_backup_tests,
                             module_synced_repos, backup_type):
    """Take a backup with all options provided

    :id: bbaf251f-7764-4b7d-b79b-f5f48f5d3b9e

    :steps:
        1. create an initial backup (for the sake of incremental)
        2. create another backup with all options provided

    :expectedresult:
        1. both backups succeed
    """
    subdir = f'{BACKUP_DIR}backup-{gen_string("alpha")}'
    sat_maintain.execute(f'mkdir -m 0777 {subdir}')
    result = sat_maintain.cli.Backup.run_backup(
        backup_dir=subdir,
        backup_type=backup_type,
        options={
            'assumeyes': True,
            'plaintext': True
        },
    )
    logger.info(result.stdout)
    assert result.status == 0
    assert 'FAIL' not in result.stdout

    init_backup_dir = re.findall(fr'{subdir}\/satellite-backup-.*-[0-5][0-9]',
                                 result.stdout)[0]

    result = sat_maintain.cli.Backup.run_backup(
        backup_dir=subdir,
        backup_type=backup_type,
        options={
            'assumeyes': True,
            'plaintext': True,
            'force': True,
            'skip-pulp-content': True,
            'preserve-directory': True,
            'split-pulp-tar': '10M',
            'incremental': init_backup_dir,
            'features': 'dns,tfp,dhcp,openscap',
        },
    )
    logger.info(result.stdout)
    assert result.status == 0
    assert 'FAIL' not in result.stdout
Exemplo n.º 18
0
def align_to_satellite(worker_id, satellite_factory):
    """Attempt to align a Satellite to the current xdist worker"""
    cache_proxy = robottelo.config.settings_proxy._cache
    # clear any hostname that may have been previously set
    cache_proxy['server.hostname'] = on_demand_sat = None

    if worker_id in ['master', 'local']:
        worker_pos = 0
    else:
        worker_pos = int(worker_id.replace('gw', ''))

    # attempt to add potential satellites from the broker inventory file
    if settings.server.inventory_filter:
        settings.server.hostnames  # need to prime the cache
        hosts = VMBroker().from_inventory(
            filter=settings.server.inventory_filter)
        # update the cache_proxy for server.hostnames in case its empty
        cache_proxy['server.hostnames'] = cache_proxy['server.hostnames'] or []
        cache_proxy['server.hostnames'].extend(
            [host.hostname for host in hosts])

    # attempt to align a worker to a satellite
    if settings.server.xdist_behavior == 'run-on-one' and settings.server.hostnames:
        cache_proxy['server.hostname'] = settings.server.hostnames[0]
    elif settings.server.hostnames and worker_pos < len(
            settings.server.hostnames):
        cache_proxy['server.hostname'] = settings.server.hostnames[worker_pos]
    elif settings.server.xdist_behavior == 'balance' and settings.server.hostnames:
        cache_proxy['server.hostname'] = random.choice(
            settings.server.hostnames)
    # get current satellite information
    elif settings.server.xdist_behavior == "on-demand":
        on_demand_sat = satellite_factory()
        if on_demand_sat.hostname:
            cache_proxy['server.hostname'] = on_demand_sat.hostname
        # if no satellite was received, fallback to balance
        if not settings.server.hostname:
            cache_proxy['server.hostname'] = random.choice(
                settings.server.hostnames)
    logger.info(f"xdist worker {worker_id} "
                f"was assigned hostname {settings.server.hostname}")
    settings.configure_airgun()
    settings.configure_nailgun()
    yield
    if on_demand_sat and settings.server.auto_checkin:
        VMBroker(hosts=[on_demand_sat]).checkin()
Exemplo n.º 19
0
def test_positive_synchronize_custom_products_future_sync_date(module_org):
    """Create a sync plan with sync date in a future and sync multiple
    custom products with multiple repos automatically.

    :id: e646196e-3951-4297-8c3c-1494d9895347

    :expectedresults: Products are synchronized successfully.

    :CaseLevel: System

    :BZ: 1695733
    """
    delay = 4 * 60  # delay for sync date in seconds
    products = [
        entities.Product(organization=module_org).create() for _ in range(2)
    ]
    repos = [
        entities.Repository(product=product).create() for product in products
        for _ in range(2)
    ]
    # Verify products have not been synced yet
    logger.info(
        f"Check products {products[0].name} and {products[1].name}"
        f" were not synced before sync plan created in org {module_org.label}")
    for repo in repos:
        with pytest.raises(AssertionError):
            validate_task_status(repo.id, module_org.id, max_tries=1)
    # Create and Associate sync plan with products
    # BZ:1695733 is closed WONTFIX so apply this workaround
    logger.info('Need to set seconds to zero because BZ#1695733')
    sync_date = datetime.utcnow().replace(second=0) + timedelta(seconds=delay)
    sync_plan = entities.SyncPlan(organization=module_org,
                                  enabled=True,
                                  sync_date=sync_date).create()
    sync_plan.add_products(
        data={'product_ids': [product.id for product in products]})
    # Wait quarter of expected time
    logger.info(
        f"Waiting {(delay / 4)} seconds to check products {products[0].name} and {products[1].name}"
        f" were not synced by {sync_plan.name}")
    sleep(delay / 4)
    # Verify products has not been synced yet
    for repo in repos:
        with pytest.raises(AssertionError):
            validate_task_status(repo.id, module_org.id, max_tries=1)
    # Wait the rest of expected time
    logger.info(
        f"Waiting {(delay * 3 / 4)} seconds to check products {products[0].name}"
        f" and {products[1].name} were synced by {sync_plan.name}")
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    for repo in repos:
        validate_task_status(repo.id, module_org.id)
        validate_repo_content(repo, ['erratum', 'package', 'package_group'])
Exemplo n.º 20
0
def test_positive_synchronize_custom_product_past_sync_date(module_org):
    """Create a sync plan with past datetime as a sync date, add a
    custom product and verify the product gets synchronized on the next
    sync occurrence

    :id: 0495cb39-2f15-4b6e-9828-1e9517c5c826

    :expectedresults: Product is synchronized successfully.

    :BZ: 1279539

    :CaseLevel: System
    """
    interval = 60 * 60  # 'hourly' sync interval in seconds
    delay = 2 * 60
    product = entities.Product(organization=module_org).create()
    repo = entities.Repository(product=product).create()
    # Create and Associate sync plan with product
    sync_plan = entities.SyncPlan(
        organization=module_org,
        enabled=True,
        interval='hourly',
        sync_date=datetime.utcnow().replace(second=0) - timedelta(seconds=interval - delay),
    ).create()
    sync_plan.add_products(data={'product_ids': [product.id]})
    # Wait quarter of expected time
    logger.info(
        f"Waiting {(delay / 4)} seconds to check product {product.name}"
        f" was not synced by {sync_plan.name}"
    )
    sleep(delay / 4)
    # Verify product is not synced and doesn't have any content
    with pytest.raises(AssertionError):
        validate_task_status(repo.id, module_org.id, max_tries=1)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'], after_sync=False)
    # Wait until the next recurrence
    logger.info(
        f"Waiting {(delay * 3 / 4)} seconds to check product {product.name}"
        f" was synced by {sync_plan.name}"
    )
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo.id, module_org.id)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'])
Exemplo n.º 21
0
    def test_positive_get_status_code(self, entity_cls):
        """GET an entity-dependent path.

        :id: 89e4fafe-7780-4be4-acc1-90f7c02a8530

        :parametrized: yes

        :expectedresults: HTTP 200 is returned with an ``application/json``
            content-type

        :CaseImportance: Critical
        """
        logger.info('test_get_status_code arg: %s', entity_cls)
        response = client.get(entity_cls().path(),
                              auth=get_credentials(),
                              verify=False)
        response.raise_for_status()
        assert http.client.OK == response.status_code
        assert 'application/json' in response.headers['content-type']
Exemplo n.º 22
0
def get_connection(
    hostname=None,
    username=None,
    password=None,
    key_filename=None,
    key_string=None,
    timeout=None,
    port=22,
):
    """Yield an ssh connection object.

    The connection will be configured with the specified arguments or will
    fall-back to server configuration in the configuration file.

    Yield this SSH connection. The connection is automatically closed when the
    caller is done using it using ``contextlib``, so clients should use the
    ``with`` statement to handle the object::

        with get_connection() as connection:
            ...

    kwargs are passed through to get_client

    :return: An SSH connection.
    :rtype: ``paramiko.SSHClient``

    """
    client = get_client(
        hostname=hostname,
        username=username,
        password=password,
        key_filename=key_filename,
        key_string=key_string,
        timeout=timeout,
        port=port,
    )
    try:
        logger.debug(f'Instantiated Paramiko client {client._id}')
        logger.info('Connected to [%s]', hostname)
        yield client
    finally:
        client.close()
        logger.debug(f'Destroyed Paramiko client {client._id}')
Exemplo n.º 23
0
def test_positive_backup_preserve_directory(sat_maintain, setup_backup_tests,
                                            module_synced_repos, backup_type):
    """Take a backup, ensure that '--preserve-directory' option works

    :id: e77dae38-d269-495d-9f48-e9572d2bb5c3

    :steps:
        1. create a backup dir, change owner to postgres
        2. create a backup
        3. check that appropriate files are created in the provided dir

    :expectedresult:
        1. backup succeeds
        2. expected files are stored in the provided dir
    """
    subdir = f'{BACKUP_DIR}backup-{gen_string("alpha")}'
    sat_maintain.execute(f'mkdir {subdir} && chown postgres:postgres {subdir}')

    result = sat_maintain.cli.Backup.run_backup(
        backup_dir=subdir,
        backup_type=backup_type,
        options={
            'assumeyes': True,
            'plaintext': True,
            'preserve-directory': True
        },
    )
    logger.info(result.stdout)
    assert result.status == 0
    assert 'FAIL' not in result.stdout

    # Check for expected files
    files = sat_maintain.execute(f'ls -a {subdir}').stdout.split('\n')
    files = [i for i in files if not re.compile(r'^\.*$').search(i)]

    if sat_maintain.is_remote_db():
        expected_files = BASIC_FILES | REMOTE_SAT_FILES
    else:
        expected_files = (BASIC_FILES | OFFLINE_FILES if backup_type
                          == 'offline' else BASIC_FILES | ONLINE_SAT_FILES)
    assert set(files).issuperset(expected_files | CONTENT_FILES), assert_msg
Exemplo n.º 24
0
def test_positive_backup_caspule_features(sat_maintain, setup_backup_tests,
                                          module_synced_repos, backup_type):
    """Take a backup with capsule features as dns, tftp, dhcp, openscap

    :id: 7ebe8fe3-e5c3-454e-8e20-fad0a9d5b464

    :steps:
        1. create a backup
        2. check that appropriate files are created

    :expectedresult:
        1. backup succeeds
        2. expected files are present in the backup
    """
    subdir = f'{BACKUP_DIR}backup-{gen_string("alpha")}'
    features = 'dns,tftp,dhcp,openscap'
    result = sat_maintain.cli.Backup.run_backup(
        backup_dir=subdir,
        backup_type=backup_type,
        options={
            'assumeyes': True,
            'plaintext': True,
            'features': features
        },
    )
    logger.info(result.stdout)
    assert result.status == 0
    assert 'FAIL' not in result.stdout

    # Check for expected files
    backup_dir = re.findall(fr'{subdir}\/satellite-backup-.*-[0-5][0-9]',
                            result.stdout)[0]
    files = sat_maintain.execute(f'ls -a {backup_dir}').stdout.split('\n')
    files = [i for i in files if not re.compile(r'^\.*$').search(i)]

    if sat_maintain.is_remote_db():
        expected_files = BASIC_FILES | REMOTE_SAT_FILES
    else:
        expected_files = (BASIC_FILES | OFFLINE_FILES if backup_type
                          == 'offline' else BASIC_FILES | ONLINE_SAT_FILES)
    assert set(files).issuperset(expected_files | CONTENT_FILES), assert_msg
Exemplo n.º 25
0
def test_negative_restore_nodir(sat_maintain, setup_backup_tests):
    """Try to run restore with no source dir provided

    :id: dadc4e32-c0b8-427f-a449-4ae66fe09268

    :steps:
        1. try to run restore with no path argument provided

    :expectedresult:
        1. should fail with appropriate error message
    """
    result = sat_maintain.cli.Restore.run(
        backup_dir='',
        options={
            'assumeyes': True,
            'plaintext': True
        },
    )
    logger.info(result.stdout)
    assert result.status != 0
    assert NODIR_MSG in str(result.stderr)
Exemplo n.º 26
0
def test_positive_synchronize_rh_product_future_sync_date(module_org):
    """Create a sync plan with sync date in a future and sync one RH
    product with it automatically.

    :id: 6697a00f-2181-4c2b-88eb-2333268d780b

    :expectedresults: Product is synchronized successfully.

    :CaseLevel: System
    """
    delay = 2 * 60  # delay for sync date in seconds
    org = entities.Organization().create()
    with manifests.clone() as manifest:
        entities.Subscription().upload(data={'organization_id': org.id},
                                       files={'content': manifest.content})
    repo_id = enable_rhrepo_and_fetchid(
        basearch='x86_64',
        org_id=org.id,
        product=PRDS['rhel'],
        repo=REPOS['rhst7']['name'],
        reposet=REPOSET['rhst7'],
        releasever=None,
    )
    product = entities.Product(name=PRDS['rhel'], organization=org).search()[0]
    repo = entities.Repository(id=repo_id).read()
    if is_open('BZ:1695733'):
        logger.info('Need to set seconds to zero because BZ:1695733')
        sync_date = datetime.utcnow().replace(second=0) + timedelta(
            seconds=delay)
    else:
        sync_date = (datetime.utcnow() + timedelta(seconds=delay), )
    sync_plan = entities.SyncPlan(organization=org,
                                  enabled=True,
                                  interval='hourly',
                                  sync_date=sync_date).create()
    # Create and Associate sync plan with product
    sync_plan.add_products(data={'product_ids': [product.id]})
    # Verify product is not synced and doesn't have any content
    with pytest.raises(AssertionError):
        validate_task_status(repo.id, max_tries=1)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'],
                          after_sync=False)
    # Wait quarter of expected time
    logger.info(
        f'Waiting {delay / 4} seconds to check product {product.name} was not synced'
    )
    sleep(delay / 4)
    # Verify product has not been synced yet
    with pytest.raises(AssertionError):
        validate_task_status(repo.id, max_tries=1)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'],
                          after_sync=False)
    # Wait the rest of expected time
    logger.info(
        f'Waiting {delay * 3 / 4} seconds to check product {product.name} was synced'
    )
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo.id, repo_backend_id=repo.backend_identifier)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'])
Exemplo n.º 27
0
def test_positive_synchronize_custom_product_daily_recurrence(module_org):
    """Create a daily sync plan with current datetime as a sync date,
    add a custom product and verify the product gets synchronized on
    the next sync occurrence

    :id: d60e33a0-f75c-498e-9e6f-0a2025295a9d

    :expectedresults: Product is synchronized successfully.

    :CaseLevel: System
    """
    delay = 2 * 60
    product = entities.Product(organization=module_org).create()
    repo = entities.Repository(product=product).create()
    start_date = datetime.utcnow().replace(second=0) - timedelta(
        days=1) + timedelta(seconds=delay)
    # Create and Associate sync plan with product
    sync_plan = entities.SyncPlan(organization=module_org,
                                  enabled=True,
                                  interval='daily',
                                  sync_date=start_date).create()
    sync_plan.add_products(data={'product_ids': [product.id]})
    # Wait quarter of expected time
    logger.info(
        f"Waiting {(delay / 4)} seconds to check product {product.name}"
        f" was not synced by {sync_plan.name}")
    sleep(delay / 4)
    # Verify product is not synced and doesn't have any content
    with pytest.raises(AssertionError):
        validate_task_status(repo.id, module_org.id, max_tries=1)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'],
                          after_sync=False)
    # Wait the rest of expected time
    logger.info(
        f"Waiting {(delay * 3 / 4)} seconds to check product {product.name}"
        f" was synced by {sync_plan.name}")
    sleep(delay * 3 / 4)
    # Verify product was synced successfully
    validate_task_status(repo.id, module_org.id)
    validate_repo_content(repo, ['erratum', 'package', 'package_group'])
Exemplo n.º 28
0
def align_to_satellite(worker_id, satellite_factory):
    """Attempt to align a Satellite to the current xdist worker"""
    # clear any hostname that may have been previously set
    settings.server.hostname = on_demand_sat = None

    if worker_id in ['master', 'local']:
        worker_pos = 0
    else:
        worker_pos = int(worker_id.replace('gw', ''))

    # attempt to add potential satellites from the broker inventory file
    if settings.server.inventory_filter:
        hosts = VMBroker().from_inventory(
            filter=settings.server.inventory_filter)
        settings.server.hostnames += [host.hostname for host in hosts]

    # attempt to align a worker to a satellite
    if settings.server.xdist_behavior == 'run-on-one' and settings.server.hostnames:
        settings.server.hostname = settings.server.hostnames[0]
    elif settings.server.hostnames and worker_pos < len(
            settings.server.hostnames):
        settings.server.hostname = settings.server.hostnames[worker_pos]
    elif settings.server.xdist_behavior == 'balance' and settings.server.hostnames:
        settings.server.hostname = random.choice(settings.server.hostnames)
    # get current satellite information
    elif settings.server.xdist_behavior == 'on-demand':
        on_demand_sat = satellite_factory()
        if on_demand_sat.hostname:
            settings.server.hostname = on_demand_sat.hostname
        # if no satellite was received, fallback to balance
        if not settings.server.hostname:
            settings.server.hostname = random.choice(settings.server.hostnames)
    logger.info(
        f'xdist worker {worker_id} was assigned hostname {settings.server.hostname}'
    )
    configure_airgun()
    configure_nailgun()
    yield
    if on_demand_sat and settings.server.auto_checkin:
        VMBroker(hosts=[on_demand_sat]).checkin()
Exemplo n.º 29
0
def test_positive_logging_from_foreman_proxy():
    """Check PUT to Smart Proxy API to refresh the features is logged and has request ID.

    :id: 0ecd8406-6cf1-4520-b8b6-8a164a1e60c2

    :expectedresults: line of log with PUT has request ID

    :CaseImportance: Medium
    """

    PUT_line_found = False
    request_id = None
    source_log_1 = '/var/log/foreman/production.log'
    test_logfile_1 = '/var/tmp/logfile_1_from_proxy'
    source_log_2 = '/var/log/foreman-proxy/proxy.log'
    test_logfile_2 = '/var/tmp/logfile_2_from_proxy'
    with ssh.get_connection() as connection:
        # get the number of lines in the source logs before the test
        line_count_start_1 = line_count(source_log_1, connection)
        line_count_start_2 = line_count(source_log_2, connection)
        # hammer command for this test
        result = connection.run('hammer proxy refresh-features --id 1')
        assert result.return_code == 0, "BASH command error?"
        # get the number of lines in the source logs after the test
        line_count_end_1 = line_count(source_log_1, connection)
        line_count_end_2 = line_count(source_log_2, connection)
        # get the log lines of interest, put them in test_logfile_1
        cut_lines(line_count_start_1, line_count_end_1, source_log_1,
                  test_logfile_1, connection)
        # get the log lines of interest, put them in test_logfile_2
        cut_lines(line_count_start_2, line_count_end_2, source_log_2,
                  test_logfile_2, connection)
    # use same location on remote and local for log file extract
    ssh.download_file(test_logfile_1)
    # use same location on remote and local for log file extract
    ssh.download_file(test_logfile_2)
    # search the log file extract for the line with PUT to host API
    with open(test_logfile_1) as logfile:
        for line in logfile:
            if re.search(r'Started PUT \"\/api\/smart_proxies\/1\/refresh',
                         line):
                logger.info('Found the line with PUT to foreman proxy API')
                PUT_line_found = True
                # Confirm the request ID was logged in the line with PUT
                match = re.search(r'\[I\|app\|\w{8}\]', line)
                assert match, "Request ID not found"
                logger.info("Request ID found for logging from foreman proxy")
                p = re.compile(r"\w{8}")
                result = p.search(line)
                request_id = result.group(0)
                break
    assert PUT_line_found, "The PUT command to refresh proxies was not found in logs."
    # search the local copy of proxy.log file for the same request ID
    with open(test_logfile_2) as logfile:
        for line in logfile:
            # Confirm request ID was logged in proxy.log
            match = line.find(request_id)
            assert match, "Request ID not found in proxy.log"
            logger.info("Request ID also found in proxy.log")
            break
Exemplo n.º 30
0
def test_negative_restore_baddir(sat_maintain, setup_backup_tests):
    """Try to run restore with non-existing source dir provided

    :id: 65ccc0d0-ca43-4877-9b29-50037e378ca5

    :steps:
        1. try to run restore with non-existing path provided

    :expectedresult:
        1. should fail with appropriate error message
    """
    subdir = f'{BACKUP_DIR}backup-{gen_string("alpha")}'
    result = sat_maintain.cli.Restore.run(
        backup_dir=subdir,
        options={
            'assumeyes': True,
            'plaintext': True
        },
    )
    logger.info(result.stdout)
    assert result.status != 0
    assert BADDIR_MSG in str(result.stdout)