コード例 #1
0
def test_pull_splitter_persistence(location):
    navigate_to(*location)
    # First we move splitter to hidden position by pulling it left twice
    pull_splitter_left()
    pull_splitter_left()
    navigate_to(Server, 'Dashboard')
    try:
        navigate_to(*location)
    except (TypeError, CannotScrollException):
        # this exception is expected here since
        # some navigation commands try to use accordion when it is hidden by splitter
        pass

    # Then we check hidden position splitter
    if not pytest.sel.elements("//div[@id='left_div'][contains(@class, 'hidden-md')]"):
        pytest.fail("Splitter did not persist when on hidden position!")
    # Then we iterate over all the other positions
    for position in ["col-md-2", "col-md-3", "col-md-4", "col-md-5"]:
        # Pull splitter left
        pull_splitter_right()
        navigate_to(Server, 'Dashboard')
        navigate_to(*location)
        # Then check its position
        if not pytest.sel.elements("//div[@id='left_div'][contains(@class, {})]"
                .format(unescape(quoteattr(position)))):
            pytest.fail("Splitter did not persist when on " + str(position) + " position!")
コード例 #2
0
 def proc_a(env, resource, prio):
     try:
         with resource.request(priority=prio) as req:
             yield req
             pytest.fail('Should have received an interrupt/preemption.')
     except simpy.Interrupt:
         pass
コード例 #3
0
    def _provisioner(template, provisioning_data, delayed=None):
        pytest.sel.force_navigate('infrastructure_provision_vms', context={
            'provider': provider,
            'template_name': template,
        })

        vm_name = provisioning_data["vm_name"]
        fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button)
        flash.assert_no_errors()

        request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
        if delayed is not None:
            total_seconds = (delayed - datetime.utcnow()).total_seconds()
            row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
            cells = {'Description': row_description}
            try:
                row, __ = wait_for(requests.wait_for_request, [cells],
                                   fail_func=requests.reload, num_sec=total_seconds, delay=5)
                pytest.fail("The provisioning was not postponed")
            except TimedOutError:
                pass
        logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
        wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600)

        # nav to requests page happens on successful provision
        logger.info('Waiting for cfme provision request for vm %s', vm_name)
        row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
        cells = {'Description': row_description}
        row, __ = wait_for(requests.wait_for_request, [cells],
                           fail_func=requests.reload, num_sec=900, delay=20)
        assert row.last_message.text == 'Vm Provisioned Successfully'
        return VM.factory(vm_name, provider)
コード例 #4
0
ファイル: test_freeze.py プロジェクト: alquerci/pip
def test_freeze_bazaar_clone(script, tmpdir):
    """
    Test freezing a Bazaar clone.

    """
    try:
        checkout_path = _create_test_package(script, vcs='bazaar')
    except OSError as e:
        pytest.fail('Invoking `bzr` failed: %s' % e)

    result = script.run(
        'bzr', 'checkout', checkout_path, 'bzr-package'
    )
    result = script.run(
        'python', 'setup.py', 'develop',
        cwd=script.scratch_path / 'bzr-package',
        expect_stderr=True,
    )
    result = script.pip('freeze', expect_stderr=True)
    expected = textwrap.dedent("""\
        ...-e bzr+file://...@1#egg=version_pkg
        ...""")
    _check_output(result.stdout, expected)

    result = script.pip(
        'freeze', '-f',
        '%s/#egg=django-wikiapp' % checkout_path,
        expect_stderr=True,
    )
    expected = textwrap.dedent("""\
        -f %(repo)s/#egg=django-wikiapp
        ...-e bzr+file://...@...#egg=version_pkg
        ...""" % {'repo': checkout_path})
    _check_output(result.stdout, expected)
コード例 #5
0
    def test_implement_tokenize_words(self):
        """
        Ensure functionality is implemented.

        | *Test Suite ID* : L
        |
        | *Test Case Number* : 04
        |
        | *Description* : Ensure that the word level clean functionality
        |                 is implemented in the class.
        |                 Tests :func:`LeipzigPreprocessor._tokenize_words`
        |
        | *Preconditions* : LeipzigPreprocessor class instance exists.
        |
        | *Test Parameters* : sentence passed to function.
        |
        | *Test Data* : sentence=''
        |
        | *Expected Result* : NotImplementedError exception is not raised.
        |
        | *Actual Result* : NotImplementedError exception is not raised.
        |
        | **Status : Pass**
        |

        """
        testing_object = self.__class__.testing_obj
        try:
            testing_object._tokenize_words(sentence='')
        except NotImplementedError:
            pytest.fail('Not Implemented _tokenize_words function')
コード例 #6
0
    def test_create_conf_py(self, conf_file, get_conf_py_path, _, get_config_params, create_index, docs_dir):
        """
        Test for a project without ``conf.py`` file.

        When this happen, the ``get_conf_py_path`` raises a
        ``ProjectConfigurationError`` which is captured by our own code and
        generates a conf.py file based using our own template.

        This template should be properly rendered in Python2 and Python3 without
        any kind of exception raised by ``append_conf`` (we were originally
        having a ``TypeError`` because of an encoding problem in Python3)
        """
        docs_dir.return_value = tempfile.mkdtemp()
        create_index.return_value = 'README.rst'
        get_config_params.return_value = {}
        get_conf_py_path.side_effect = ProjectConfigurationError
        conf_file.return_value = tempfile.mktemp()
        try:
            self.base_sphinx.append_conf()
        except Exception:
            pytest.fail('Exception was generated when append_conf called.')

        # Check the content generated by our method is the same than what we
        # expects from a pre-generated file
        generated_conf_py = os.path.join(self.base_sphinx.docs_dir(), 'conf.py')
        expected_conf_py = os.path.join(os.path.dirname(__file__), '..', 'files', 'conf.py')
        with open(generated_conf_py) as gf, open(expected_conf_py) as ef:
            self.assertEqual(gf.read(), ef.read())
コード例 #7
0
ファイル: pytester.py プロジェクト: JanBednarik/pytest
    def fnmatch_lines(self, lines2):
        """Search the text for matching lines.

        The argument is a list of lines which have to match and can
        use glob wildcards.  If they do not match an pytest.fail() is
        called.  The matches and non-matches are also printed on
        stdout.

        """
        def show(arg1, arg2):
            py.builtin.print_(arg1, arg2, file=sys.stderr)
        lines2 = self._getlines(lines2)
        lines1 = self.lines[:]
        nextline = None
        extralines = []
        __tracebackhide__ = True
        for line in lines2:
            nomatchprinted = False
            while lines1:
                nextline = lines1.pop(0)
                if line == nextline:
                    show("exact match:", repr(line))
                    break
                elif fnmatch(nextline, line):
                    show("fnmatch:", repr(line))
                    show("   with:", repr(nextline))
                    break
                else:
                    if not nomatchprinted:
                        show("nomatch:", repr(line))
                        nomatchprinted = True
                    show("    and:", repr(nextline))
                extralines.append(nextline)
            else:
                pytest.fail("remains unmatched: %r, see stderr" % (line,))
コード例 #8
0
ファイル: quteprocess.py プロジェクト: Dietr1ch/qutebrowser
    def after_test(self):
        """Handle unexpected/skip logging and clean up after each test."""
        __tracebackhide__ = lambda e: e.errisinstance(pytest.fail.Exception)
        bad_msgs = [msg for msg in self._data
                    if self._is_error_logline(msg) and not msg.expected]

        try:
            call = self.request.node.rep_call
        except AttributeError:
            pass
        else:
            if call.failed or hasattr(call, 'wasxfail'):
                super().after_test()
                return

        try:
            if bad_msgs:
                text = 'Logged unexpected errors:\n\n' + '\n'.join(
                    str(e) for e in bad_msgs)
                # We'd like to use pytrace=False here but don't as a WORKAROUND
                # for https://github.com/pytest-dev/pytest/issues/1316
                pytest.fail(text)
            else:
                self._maybe_skip()
        finally:
            super().after_test()
コード例 #9
0
ファイル: test_builds.py プロジェクト: Zearin/pypackage
def verify_source(source_files, gz_path):
    """Ensure the std python dist files and source_files are in the .tar.gz."""

    f_name = glob.glob(os.path.join(gz_path, "*.tar.gz"))[0]
    with tarfile.open(f_name, "r:gz") as tar_file:
        tar_files = tar_file.getnames()

    pkg_full_name = os.path.basename(
        os.path.dirname(gz_path)
    ).split(".tar.gz")[0]
    egg_name = "{}.egg-info".format(pkg_full_name.rsplit("-")[0])  # fragile..
    source_files.extend([
        "PKG-INFO",
        egg_name,
        os.path.join(egg_name, "dependency_links.txt"),
        os.path.join(egg_name, "PKG-INFO"),
        os.path.join(egg_name, "SOURCES.txt"),
        os.path.join(egg_name, "top_level.txt"),
        "setup.cfg",
        "setup.py",
    ])
    assert len(tar_files) == len(source_files) + 1  # +1 for the base dir
    base_dir_skipped = False
    for tar_file in tar_files:
        assert tar_file.startswith(pkg_full_name)
        if os.path.sep in tar_file:
            tar_file = tar_file[tar_file.index(os.path.sep) + 1:]
            assert tar_file in source_files
        elif not base_dir_skipped:
            base_dir_skipped = True
        else:
            pytest.fail("{} not expected in source dist!".format(tar_file))
コード例 #10
0
ファイル: t5_ctrl.py プロジェクト: NickChen0113/s3ql
    def tst_ctrl_flush(self):

        try:
            s3ql.ctrl.main(['flushcache', self.mnt_dir])
        except:
            sys.excepthook(*sys.exc_info())
            pytest.fail("s3qlctrl raised exception")
コード例 #11
0
ファイル: conftest.py プロジェクト: luoch/dcos
    def destroy_marathon_app(self, app_name, timeout=300):
        """Remove a marathon app

        Abort the test if the removal was unsuccesful.

        Args:
            app_name: name of the applicatoin to remove
            timeout: seconds to wait for destruction before failing test
        """
        @retrying.retry(wait_fixed=1000, stop_max_delay=timeout*1000,
                        retry_on_result=lambda ret: not ret,
                        retry_on_exception=lambda x: False)
        def _destroy_complete(deployment_id):
            r = self.get('/marathon/v2/deployments', headers=self._marathon_req_headers())
            assert r.ok

            for deployment in r.json():
                if deployment_id == deployment.get('id'):
                    logging.info('Waiting for application to be destroyed')
                    return False
            logging.info('Application destroyed')
            return True

        r = self.delete('/marathon/v2/apps' + app_name, headers=self._marathon_req_headers())
        assert r.ok

        try:
            _destroy_complete(r.json()['deploymentId'])
        except retrying.RetryError:
            pytest.fail("Application destroy failed - operation was not "
                        "completed in {} seconds.".format(timeout))
コード例 #12
0
def test_warning_no():
    """No shadow warning is raised"""
    with rasterio.open('tests/data/RGB.byte.tif') as src:
        try:
            rm, gm, bm = src.read_masks()
        except NodataShadowWarning:
            pytest.fail("Unexpected NodataShadowWarning raised")
コード例 #13
0
def test_ogr_fgdb_stress_2():
    if ogrtest.fgdb_drv is None:
        pytest.skip()

    ds_test = ogr.Open('tmp/test.gdb')
    ds_ref = ogr.Open('tmp/test.' + ogrtest.reference_ext)

    lyr_test = ds_test.GetLayer(0)
    lyr_ref = ds_ref.GetLayer(0)

    while True:
        f_test = lyr_test.GetNextFeature()
        f_ref = lyr_ref.GetNextFeature()
        assert not (f_test is None and f_ref is not None) or (f_test is not None and f_ref is None)
        if f_test is None:
            break
        if f_test.GetFID() != f_ref.GetFID() or \
           f_test['str'] != f_ref['str'] or \
           ogrtest.check_feature_geometry(f_test, f_ref.GetGeometryRef()) != 0:
            f_test.DumpReadable()
            f_ref.DumpReadable()
            pytest.fail()

    for val in range(1000):
        lyr_test.SetAttributeFilter("str = '%d'" % val)
        lyr_ref.SetAttributeFilter("str = '%d'" % val)
        assert lyr_test.GetFeatureCount() == lyr_ref.GetFeatureCount(), val
コード例 #14
0
ファイル: quteprocess.py プロジェクト: AdaJass/qutebrowser
    def after_test(self, did_fail):  # pylint: disable=arguments-differ
        """Handle unexpected/skip logging and clean up after each test.

        Args:
            did_fail: Set if the main test failed already, then logged errors
                      are ignored.
        """
        __tracebackhide__ = True
        bad_msgs = [msg for msg in self._data
                    if self._is_error_logline(msg) and not msg.expected]

        if did_fail:
            super().after_test()
            return

        try:
            if bad_msgs:
                text = 'Logged unexpected errors:\n\n' + '\n'.join(
                    str(e) for e in bad_msgs)
                # We'd like to use pytrace=False here but don't as a WORKAROUND
                # for https://github.com/pytest-dev/pytest/issues/1316
                pytest.fail(text)
            else:
                self._maybe_skip()
        finally:
            super().after_test()
コード例 #15
0
ファイル: conftest.py プロジェクト: jayfk/pyinstaller
    def _test_executables(self, name, args, runtime, run_from_path):
        """
        Run created executable to make sure it works.

        Multipackage-tests generate more than one exe-file and all of
        them have to be run.

        :param args: CLI options to pass to the created executable.
        :param runtime: Time in miliseconds how long to keep the executable running.

        :return: Exit code of the executable.
        """
        __tracebackhide__ = True
        # TODO implement runtime - kill the app (Ctrl+C) when time times out
        exes = self._find_executables(name)
        # Empty list means that PyInstaller probably failed to create any executable.
        assert exes != [], 'No executable file was found.'
        for exe in exes:
            # Try to find .toc log file. .toc log file has the same basename as exe file.
            toc_log = os.path.join(_LOGS_DIR, os.path.basename(exe) + '.toc')
            if os.path.exists(toc_log):
                if not self._examine_executable(exe, toc_log):
                    pytest.fail('Matching .toc of %s failed.' % exe)
            retcode = self._run_executable(exe, args, run_from_path, runtime)
            if retcode != 0:
                pytest.fail('Running exe %s failed with return-code %s.' %
                            (exe, retcode))
コード例 #16
0
ファイル: test_actions.py プロジェクト: seandst/cfme_tests
def test_action_untag(request, assign_policy_for_testing, vm, vm_off, vm_crud_refresh):
    """ Tests action untag

    Metadata:
        test_flag: actions, provision
    """
    tag_unassign_action = explorer.Action(
        fauxfactory.gen_alphanumeric(),
        action_type="Remove Tags",
        action_values={"cat_service_level": True}
    )
    assign_policy_for_testing.assign_actions_to_event("VM Power On", [tag_unassign_action])

    def finalize():
        assign_policy_for_testing.assign_events()
        tag_unassign_action.delete()
    request.addfinalizer(finalize)

    vm.start_vm()
    vm_crud_refresh()
    try:
        wait_for(
            lambda: not any(
                [tag.category == "service_level" and tag.tag_name == "gold" for tag in vm.soap.tags]
            ),
            num_sec=600,
            message="tag presence check"
        )
    except TimedOutError:
        pytest.fail("Tags were not unassigned!")
コード例 #17
0
def test_vm_discovery(request, setup_provider, provider, vm_crud):
    """ Tests whether cfme will discover a vm change (add/delete) without being manually refreshed.

    Prerequisities:
        * Desired provider set up

    Steps:
        * Create a virtual machine on the provider.
        * Wait for the VM to appear
        * Delete the VM from the provider (not using CFME)
        * Wait for the VM to become Archived.

    Metadata:
        test_flag: discovery
    """

    @request.addfinalizer
    def _cleanup():
        vm_crud.delete_from_provider()
        if_scvmm_refresh_provider(provider)

    vm_crud.create_on_provider(allow_skip="default")
    if_scvmm_refresh_provider(provider)

    try:
        vm_crud.wait_to_appear(timeout=600, load_details=False)
    except TimedOutError:
        pytest.fail("VM was not found in CFME")
    vm_crud.delete_from_provider()
    if_scvmm_refresh_provider(provider)
    wait_for_vm_state_changes(vm_crud)
コード例 #18
0
def test_ssa_users(provider, instance, soft_assert):
    """ Tests SSA fetches correct results for users list

    Metadata:
        test_flag: vm_analysis
    """
    username = fauxfactory.gen_alphanumeric()
    expected = None

    # In windows case we can't add new users (yet)
    # So we simply check that user list doesn't cause any Rails errors
    if instance.system_type != WINDOWS:
        # Add a new user
        instance.ssh.run_command("userdel {0} || useradd {0}".format(username))
        expected = instance.ssh.run_command("cat /etc/passwd | wc -l").output.strip('\n')

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="15m", fail_func=lambda: toolbar.select('Reload'))

    # Check that all data has been fetched
    current = instance.get_detail(properties=('Security', 'Users'))
    if instance.system_type != WINDOWS:
        assert current == expected

    # Make sure created user is in the list
    instance.open_details(("Security", "Users"))
    if instance.system_type != WINDOWS:
        if not instance.paged_table.find_row_on_all_pages('Name', username):
            pytest.fail("User {0} was not found".format(username))
コード例 #19
0
def test_ssa_groups(provider, instance, soft_assert):
    """ Tests SSA fetches correct results for groups

    Metadata:
        test_flag: vm_analysis
    """
    group = fauxfactory.gen_alphanumeric()
    expected = None

    if instance.system_type != WINDOWS:
        # Add a new group
        instance.ssh.run_command("groupdel {0} || groupadd {0}".format(group))
        expected = instance.ssh.run_command("cat /etc/group | wc -l").output.strip('\n')

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="15m", fail_func=lambda: toolbar.select('Reload'))

    # Check that all data has been fetched
    current = instance.get_detail(properties=('Security', 'Groups'))
    if instance.system_type != WINDOWS:
        assert current == expected

    # Make sure created group is in the list
    instance.open_details(("Security", "Groups"))
    if instance.system_type != WINDOWS:
        if not instance.paged_table.find_row_on_all_pages('Name', group):
            pytest.fail("Group {0} was not found".format(group))
コード例 #20
0
    def test_build_file_metadata_from_contents(self, metadata_fixtures):
        try:
            metadata = GitHubFileContentMetadata(metadata_fixtures['file_metadata_content_endpoint'])
        except Exception as exc:
            pytest.fail(str(exc))

        assert metadata.name == 'epsilon'
        assert metadata.path == '/epsilon'
        assert metadata.modified is None
        assert metadata.content_type is None
        assert metadata.size == 15
        assert metadata.size_as_int == 15
        assert type(metadata.size_as_int) == int
        assert metadata.etag == '/epsilon::bd4fb614678f544acb22bac6861a21108f1e5d10'
        assert metadata.extra == {
            'fileSha': 'bd4fb614678f544acb22bac6861a21108f1e5d10',
            'webView': None,
            'hashes': {
                'git': 'bd4fb614678f544acb22bac6861a21108f1e5d10',
            },
        }
        assert metadata.provider == 'github'

        assert metadata.commit is None
        assert metadata.ref is None
        assert metadata.web_view is None
コード例 #21
0
    def test_file_metadata_with_ref(self, metadata_fixtures):
        try:
            metadata = GitHubFileTreeMetadata(metadata_fixtures['file_metadata_tree_endpoint'], ref="some-branch")
        except Exception as exc:
            pytest.fail(str(exc))

        assert metadata.name == 'README.md'
        assert metadata.path == '/README.md'
        assert metadata.modified is None
        assert metadata.content_type is None
        assert metadata.size == 38
        assert metadata.size_as_int == 38
        assert type(metadata.size_as_int) == int
        assert metadata.etag == '/README.md::d863d70539aa9fcb6b44b057221706f2ab18e341'
        assert metadata.extra == {
            'fileSha': 'd863d70539aa9fcb6b44b057221706f2ab18e341',
            'webView': None,
            'ref': 'some-branch',
            'hashes': {
                'git': 'd863d70539aa9fcb6b44b057221706f2ab18e341',
            },

        }
        assert metadata.provider == 'github'

        assert metadata.commit is None
        assert metadata.ref == 'some-branch'
        assert metadata.web_view is None

        json_api = metadata.json_api_serialized('mst3k')
        for actions, link in json_api['links'].items():
            assert re.search('[?&]ref=some-branch', link)
コード例 #22
0
def test_abort_co_read(conn, monkeypatch):
    # We need to delay the write to ensure that we encounter a blocking read
    path = '/foo/wurfl'
    chunks = [300, 317, 283]
    delay = 10
    while True:
        monkeypatch.setattr(MockRequestHandler, 'do_GET',
                            get_chunked_GET_handler(path, chunks, delay))
        conn.send_request('GET', path)
        resp = conn.read_response()
        assert resp.status == 200
        cofun = conn.co_read(450)
        try:
            next(cofun)
        except StopIteration:
            # Not good, need to wait longer
            pass
        else:
            break
        finally:
            conn.disconnect()

        if delay > 5000:
            pytest.fail('no blocking read even with %f sec sleep' % delay)
        delay *= 2

    assert_raises(dugong.ConnectionClosed, next, cofun)
コード例 #23
0
def test_insert(empty_warehouse):
    """
    Inserts a fact with MAX_ITERATIONS ^ 2 rows.
    """
    enable_logging()

    Warehouse.use(empty_warehouse)

    Store.build()
    stores = _get_instances('store')
    Store.insert(*stores)

    Product.build()
    products = _get_instances('product')
    Product.insert(*products)

    Sales.build()
    sales = _get_instances('sales')

    start_time = datetime.datetime.now()
    print 'Starting bulk insert of fact at ', start_time

    try:
        Sales.insert(*sales)
    except OperationalError:
        pytest.fail('The connection broke.')

    end_time = datetime.datetime.now()
    print 'Ending bulk insert of fact at ', end_time

    delta = end_time - start_time
    print 'Time taken = ', delta
コード例 #24
0
def test_action_tag(request, assign_policy_for_testing, vm, vm_off, vm_crud_refresh):
    """ Tests action tag

    Metadata:
        test_flag: actions, provision
    """
    if any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
           for tag in vm.crud.get_tags()):
        vm.crud.remove_tag(("Service Level", "Gold"))

    tag_assign_action = actions.Action(
        fauxfactory.gen_alphanumeric(),
        action_type="Tag",
        action_values={"tag": ("My Company Tags", "Service Level", "Gold")}
    )
    assign_policy_for_testing.assign_actions_to_event("VM Power On", [tag_assign_action])

    @request.addfinalizer
    def finalize():
        assign_policy_for_testing.assign_events()
        tag_assign_action.delete()

    vm.start_vm()
    vm_crud_refresh()
    try:
        wait_for(
            lambda: any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
                        for tag in vm.crud.get_tags()),
            num_sec=600,
            message="tag presence check"
        )
    except TimedOutError:
        pytest.fail("Tags were not assigned!")
コード例 #25
0
ファイル: t5_lock_rm.py プロジェクト: NickChen0113/s3ql
    def tst_lock_rm(self):

        # Extract tar
        tempdir = os.path.join(self.mnt_dir, 'lock_dir')
        filename = os.path.join(tempdir, 'myfile')
        os.mkdir(tempdir)
        with open(filename, 'w') as fh:
            fh.write('Hello, world')

        # copy
        try:
            s3ql.lock.main([tempdir])
        except:
            sys.excepthook(*sys.exc_info())
            pytest.fail("s3qllock raised exception")

        # Try to delete
        assert_raises(PermissionError, os.unlink, filename)

        # Try to write
        with pytest.raises(PermissionError):
            open(filename, 'w+').write('Hello')

        # delete properly
        try:
            s3ql.remove.main([tempdir])
        except:
            sys.excepthook(*sys.exc_info())
            pytest.fail("s3qlrm raised exception")

        assert 'lock_dir' not in llfuse.listdir(self.mnt_dir)
コード例 #26
0
ファイル: mrsid.py プロジェクト: AsgerPetersen/gdal
def test_mrsid_online_4():

    if gdaltest.jp2mrsid_drv is None:
        pytest.skip()

    if not gdaltest.download_file('http://www.openjpeg.org/samples/Bretagne2.j2k', 'Bretagne2.j2k'):
        pytest.skip()
    if not gdaltest.download_file('http://www.openjpeg.org/samples/Bretagne2.bmp', 'Bretagne2.bmp'):
        pytest.skip()

    # Checksum = 53186 on my PC
    tst = gdaltest.GDALTest('JP2MrSID', 'tmp/cache/Bretagne2.j2k', 1, None, filename_absolute=1)

    tst.testOpen()

    ds = gdal.Open('tmp/cache/Bretagne2.j2k')
    ds_ref = gdal.Open('tmp/cache/Bretagne2.bmp')
    maxdiff = gdaltest.compare_ds(ds, ds_ref, width=256, height=256)

    ds = None
    ds_ref = None

    # Difference between the image before and after compression
    if maxdiff > 1:
        print(ds.GetRasterBand(1).Checksum())
        print(ds_ref.GetRasterBand(1).Checksum())
        pytest.fail('Image too different from reference')
コード例 #27
0
    def _test_run(self, protocol):
        fh = open('/tmp/hb.txt', 'w')
        list_for_find = ['wrtest.com', 'some', 'nf', 'aaa.wrtest.com']
        for obj in list_for_find:
            fh.write("{0}\n".format(obj))
        fh.close()

        queue = HostsBruteJob()
        generator = FileGenerator('/tmp/hb.txt')
        queue.set_generator(generator)

        result = []
        thrd = HostsBruteThread(
            queue=queue,
            protocol=protocol,
            host='wrtest.com',
            template='@',
            mask_symbol='@',
            false_phrase='403 Forbidden',
            retest_codes='',
            delay=0,
            ignore_words_re='',
            counter=CounterMock(),
            result=result
        )
        thrd.setDaemon(True)
        thrd.start()

        start_time = int(time.time())
        while not thrd.done:
            if int(time.time()) - start_time > self.threads_max_work_time:
                pytest.fail("Thread work {0} secs".format(int(time.time()) - start_time))
            time.sleep(1)

        assert result == ['wrtest.com', 'aaa.wrtest.com']
コード例 #28
0
def test_blocking_read(conn, monkeypatch):
    path = '/foo/wurfl'
    chunks = [120] * 10
    delay = 10

    while True:
        monkeypatch.setattr(MockRequestHandler, 'do_GET',
                            get_chunked_GET_handler(path, chunks, delay))
        conn.send_request('GET', path)

        resp = conn.read_response()
        assert resp.status == 200

        interrupted = 0
        parts = []
        while True:
            crt = conn.co_read(100)
            try:
                while True:
                    io_req = next(crt)
                    interrupted += 1
                    assert io_req.poll(5)
            except StopIteration as exc:
                buf = exc.value
                if not buf:
                    break
                parts.append(buf)
        assert not conn.response_pending()

        assert _join(parts) == b''.join(DUMMY_DATA[:x] for x in chunks)
        if interrupted >= 8:
            break
        elif delay > 5000:
            pytest.fail('no blocking read even with %f sec sleep' % delay)
        delay *= 2
コード例 #29
0
ファイル: mrsid.py プロジェクト: AsgerPetersen/gdal
def test_mrsid_2():

    if gdaltest.mrsid_drv is None:
        pytest.skip()

    ds = gdal.Open('data/mercator.sid')

    try:
        data = ds.ReadRaster(0, 0, 515, 515, buf_xsize=10, buf_ysize=10)
    except:
        pytest.fail('Small overview read failed: ' + gdal.GetLastErrorMsg())

    ds = None

    is_bytes = False
    if (isinstance(data, bytes) and not isinstance(data, str)):
        is_bytes = True

    # check that we got roughly the right values by checking mean.
    if is_bytes is True:
        total = sum(data)
    else:
        total = sum([ord(c) for c in data])

    mean = float(total) / len(data)

    assert mean >= 95 and mean <= 105, 'image mean out of range.'
コード例 #30
0
    def test_build_file_metadata_from_tree(self, metadata_fixtures):
        try:
            metadata = GitHubFileTreeMetadata(metadata_fixtures['file_metadata_tree_endpoint'])
        except Exception as exc:
            pytest.fail(str(exc))

        assert metadata.name == 'README.md'
        assert metadata.path == '/README.md'
        assert metadata.modified is None
        assert metadata.content_type is None
        assert metadata.size == 38
        assert metadata.size_as_int == 38
        assert type(metadata.size_as_int) == int
        assert metadata.etag == '/README.md::d863d70539aa9fcb6b44b057221706f2ab18e341'
        assert metadata.extra == {
            'fileSha': 'd863d70539aa9fcb6b44b057221706f2ab18e341',
            'webView': None,
            'hashes': {
                'git': 'd863d70539aa9fcb6b44b057221706f2ab18e341',
            },
        }
        assert metadata.provider == 'github'

        assert metadata.commit is None
        assert metadata.ref is None
        assert metadata.web_view is None
コード例 #31
0
def test_TDCT_correlationImport():
	if 'TDCT_correlation' not in globals():
		pytest.fail("TDCT_correlation import: {0}".format(TDCT_error))
コード例 #32
0
 def interruptee(env):
     try:
         yield env.timeout(10)
         pytest.fail('Expected an interrupt')
     except simpy.Interrupt as interrupt:
         assert interrupt.cause == 'interrupt!'
コード例 #33
0
def test_ust_java_agent_interface(tmpdir, ust_label, tools_label, app_version,
                                  outcome):
    """
    Use the agent coming from ust_label, but run app under tools_version runtime using ust agent.
    """

    nb_iter = 100
    nb_events = 3 * nb_iter

    # Prepare environment
    ust = ProjectFactory.get_precook(ust_label)
    tools = ProjectFactory.get_precook(tools_label)
    babeltrace = ProjectFactory.get_precook(Settings.default_babeltrace)

    tools_runtime_path = os.path.join(str(tmpdir), "tools")
    ust_runtime_path = os.path.join(str(tmpdir), "ust")
    app_path = os.path.join(str(tmpdir), "app")

    with Run.get_runtime(ust_runtime_path) as runtime_app, Run.get_runtime(
            tools_runtime_path) as runtime_tools:
        runtime_tools.add_project(tools)
        runtime_tools.add_project(babeltrace)

        runtime_app.add_project(ust)
        runtime_app.lttng_home = runtime_tools.lttng_home

        trace_path = os.path.join(runtime_tools.lttng_home, 'trace')

        # Make application using the ust runtime
        shutil.copytree(version_to_app[app_version], app_path)
        runtime_app.run("javac App.java", cwd=app_path)

        # Start lttng-sessiond
        sessiond = utils.sessiond_spawn(runtime_tools)

        # Create session using mi to get path and session name
        runtime_tools.run('lttng create trace --output={}'.format(trace_path))

        runtime_tools.run('lttng enable-event -j jello')
        runtime_tools.run('lttng start')

        # Steal the classpath from ust project
        ust_classpath = ust.special_env_variables['CLASSPATH']

        # Run application with tools runtime
        cmd = 'java App {}'.format(nb_iter)
        runtime_tools.run(cmd, cwd=app_path, classpath=ust_classpath)

        # Stop tracing
        runtime_tools.run('lttng stop')
        runtime_tools.run('lttng destroy -a')
        cp = runtime_tools.subprocess_terminate(sessiond)
        if cp.returncode != 0:
            pytest.fail("Sessiond return code")

        # Read trace with babeltrace and check for event count via number of line
        cmd = 'babeltrace {}'.format(trace_path)
        if outcome == "Success":
            assert (utils.file_contains(
                runtime_tools.get_subprocess_stderr_path(sessiond),
                ["New registration for pid"]))
            cp_process, cp_out, cp_err = runtime_tools.run(cmd)
            assert (utils.line_count(cp_out) == nb_events)
        else:
            if outcome == "Unsupported protocol":
                assert (not (utils.file_contains(
                    runtime_tools.get_subprocess_stderr_path(sessiond),
                    ["New registration for pid"])))
                cp_process, cp_out, cp_err = runtime_tools.run(cmd)
                assert (utils.line_count(cp_out) == 0)
コード例 #34
0
def test_1(act: Action):
    pytest.fail("Not IMPLEMENTED")
コード例 #35
0
def test_list_projects_summary_format(db: Session, client: TestClient) -> None:
    # create empty project
    empty_project_name = "empty-project"
    empty_project = mlrun.api.schemas.Project(
        metadata=mlrun.api.schemas.ProjectMetadata(name=empty_project_name), )
    response = client.post("/api/projects", json=empty_project.dict())
    assert response.status_code == HTTPStatus.OK.value

    # create project with resources
    project_name = "project-with-resources"
    project = mlrun.api.schemas.Project(
        metadata=mlrun.api.schemas.ProjectMetadata(name=project_name), )
    response = client.post("/api/projects", json=project.dict())
    assert response.status_code == HTTPStatus.OK.value

    # create functions for the project
    functions_count = 5
    _create_functions(client, project_name, functions_count)

    # create feature sets for the project
    feature_sets_count = 9
    _create_feature_sets(client, project_name, feature_sets_count)

    # create model artifacts for the project
    models_count = 4
    _create_artifacts(client, project_name, models_count,
                      mlrun.artifacts.model.ModelArtifact.kind)

    # create dataset artifacts for the project to make sure we're not mistakenly count them
    _create_artifacts(client, project_name, 7,
                      mlrun.artifacts.dataset.DatasetArtifact.kind)

    # create runs for the project
    running_runs_count = 5
    _create_runs(
        client,
        project_name,
        running_runs_count,
        mlrun.runtimes.constants.RunStates.running,
    )

    # create completed runs for the project to make sure we're not mistakenly count them
    _create_runs(client, project_name, 2,
                 mlrun.runtimes.constants.RunStates.completed)

    # create failed runs for the project for less than 24 hours ago
    recent_failed_runs_count = 6
    one_hour_ago = datetime.datetime.now() - datetime.timedelta(hours=1)
    _create_runs(
        client,
        project_name,
        recent_failed_runs_count,
        mlrun.runtimes.constants.RunStates.error,
        one_hour_ago,
    )

    # create aborted runs for the project for less than 24 hours ago - make sure we count them as well
    recent_aborted_runs_count = 6
    one_hour_ago = datetime.datetime.now() - datetime.timedelta(hours=1)
    _create_runs(
        client,
        project_name,
        recent_failed_runs_count,
        mlrun.runtimes.constants.RunStates.aborted,
        one_hour_ago,
    )

    # create failed runs for the project for more than 24 hours ago to make sure we're not mistakenly count them
    two_days_ago = datetime.datetime.now() - datetime.timedelta(hours=48)
    _create_runs(client, project_name, 3,
                 mlrun.runtimes.constants.RunStates.error, two_days_ago)

    # list projects with summary format
    response = client.get("/api/projects",
                          params={"format": mlrun.api.schemas.Format.summary})
    projects_output = mlrun.api.schemas.ProjectsOutput(**response.json())
    for index, project_summary in enumerate(projects_output.projects):
        if project_summary.name == empty_project_name:
            _assert_project_summary(project_summary, 0, 0, 0, 0, 0)
        elif project_summary.name == project_name:
            _assert_project_summary(
                project_summary,
                functions_count,
                feature_sets_count,
                models_count,
                recent_failed_runs_count + recent_aborted_runs_count,
                running_runs_count,
            )
        else:
            pytest.fail(
                f"Unexpected project summary returned: {project_summary}")
コード例 #36
0
ファイル: test_core.py プロジェクト: spencerng/albumentations
def test_check_bboxes_with_correct_values():
    try:
        check_bboxes([[0.1, 0.5, 0.8, 1.0], [0.2, 0.5, 0.5, 0.6, 99]])
    except Exception as e:  # skipcq: PYL-W0703
        pytest.fail("Unexpected Exception {!r}".format(e))
コード例 #37
0
def check_image_not_in_list(glance, image):
    __tracebackhide__ = True
    image_list = parser.listing(glance('image-list'))
    if image['id'] in [x['ID'] for x in image_list]:
        pytest.fail('There is image {id} in list'.format(**image))
コード例 #38
0
ファイル: conftest.py プロジェクト: tbpassin/rst2pdf
 def _fail(self, msg, output=None):
     pytest.fail(
         f'{msg}:\n\n{output.decode("utf-8")}' if output else msg,
         pytrace=False,
     )
コード例 #39
0
def test_valid_filemeta(new_rtstruct: RTStruct):
    try:
        validate_file_meta(new_rtstruct.ds.file_meta)
    except Exception:
        pytest.fail("Invalid file meta in RTStruct dataset")
コード例 #40
0
ファイル: conftest.py プロジェクト: tbpassin/rst2pdf
def compare_pdfs(path_a, path_b):
    try:
        pdf_a = fitz.open(path_a)
    except RuntimeError:
        pytest.fail(
            'Reference file at %r is not a valid PDF.' %
            (os.path.relpath(path_a, ROOT_DIR), ),
            pytrace=False,
        )

    try:
        pdf_b = fitz.open(path_b)
    except RuntimeError:
        pytest.fail(
            'Output file at %r is not a valid PDF.' %
            (os.path.relpath(path_b, ROOT_DIR), ),
            pytrace=False,
        )

    # sanity check

    assert pdf_a.isPDF
    assert pdf_b.isPDF

    # compare metadata

    assert _get_metadata(pdf_a) == _get_metadata(pdf_b)

    # compare content

    pages_a = _get_pages(pdf_a)
    pages_b = _get_pages(pdf_b)

    def fuzzy_coord_diff(coord_a, coord_b):
        diff = abs(coord_a - coord_b)
        threshold = 3  # 3 px is approximately 1.06mm
        assert (
            diff < threshold
        ), "Coordinates of the last printed block differs from the reference"

    def fuzzy_string_diff(string_a, string_b):
        a_is_image = string_a.startswith("<image: DeviceRGB")
        b_is_image = string_b.startswith("<image: DeviceRGB")
        if a_is_image and b_is_image:
            # We can't necessarily control the image metadata text in the block (e.g. from plantuml), so we do not
            # check it.
            return

        words_a = string_a.split()
        words_b = string_b.split()
        assert (words_a == words_b
                ), "Text of the last printed block differs from the reference"

    assert len(pages_a) == len(pages_b)
    page_no = 0
    for page_a, page_b in zip(pages_a, pages_b):
        page_no = page_no + 1
        print(f"++ Page {page_no} ++")
        print(f"page_a: {page_a}")
        print(f"page_b: {page_b}")
        print("number of blocks in page_a: %s" % len(page_a))
        print("number of blocks in page_b: %s" % len(page_b))
        assert len(page_a) == len(page_b)
        for block_a, block_b in zip(page_a, page_b):
            # each block has the following format:
            #
            # (x0, y0, x1, y1, "lines in block", block_type, block_no)
            #
            # block_type and block_no should remain unchanged, but it's
            # possible for the blocks to move around the document slightly and
            # the text refold without breaking entirely
            print(f"block_a: {block_a}")
            print(f"block_b: {block_b}")
            fuzzy_coord_diff(block_a[0], block_b[0])
            fuzzy_coord_diff(block_a[1], block_b[1])
            fuzzy_coord_diff(block_a[2], block_b[2])
            fuzzy_coord_diff(block_a[3], block_b[3])
            fuzzy_string_diff(block_a[4], block_b[4])
            assert block_a[5] == block_b[5]
            assert block_a[6] == block_b[6]
コード例 #41
0
 def _blocking_wrapper(*args, **kwargs):
     __tracebackhide__ = True
     __tracebackhide__  # Silence pyflakes
     pytest.fail("Database access not allowed, "
                 'use the "django_db" mark, or the '
                 '"db" or "transactional_db" fixtures to enable it.')
コード例 #42
0
 def build_generator():
     yield 1
     pytest.fail("generator should not consume past first item")
     yield 2
コード例 #43
0
ファイル: test.py プロジェクト: agmoss/Rental-Data-ETL
def test_connection_success():

    try:
        conn = Database.Database.connect()
    except MyError:
        pytest.fail("Unhandled exception")
コード例 #44
0
    def check_data(obj, roundtrip=False):
        """Basic checks of the data"""

        assert (obj.staterror == 0).sum() == 714
        assert obj.staterror.sum() == pytest.approx(449.71593831)
        assert obj.staterror[0] == pytest.approx(0.0)
        assert obj.staterror[13] == pytest.approx(1.73205081)
        assert obj.staterror[51] == pytest.approx(3.1622776602)
        assert obj.staterror[1023] == pytest.approx(2.82842712)
        assert np.argmax(obj.staterror) == 51

        assert obj.syserror is None
        assert obj.bin_lo is None
        assert obj.bin_hi is None

        assert obj.exposure == pytest.approx(38564.608926889)
        assert np.log10(obj.backscal) == pytest.approx(-5.597491618115439)
        assert obj.areascal == pytest.approx(1.0)
        for f in ["grouped", "subtracted", "rate"]:
            assert isinstance(getattr(obj, f), bool)

        assert obj.grouped
        assert not obj.subtracted
        assert obj.rate

        assert obj.plot_fac == 0

        assert obj.channel.dtype == np.dtype("float64")
        assert obj.counts.dtype == np.dtype("float64")

        assert obj.channel == pytest.approx(np.arange(1, 1025))
        assert len(obj.counts) == 1024
        assert obj.counts[0:11] == pytest.approx(np.zeros(11))
        cvals = [1, 3, 2, 3, 7, 1, 6, 4, 4, 0]
        assert obj.counts[12:22] == pytest.approx(cvals)

        assert len(obj.grouping) == 1024
        if roundtrip:
            assert obj.quality is None
        else:
            assert len(obj.quality) == 1024
            assert obj.quality == pytest.approx(np.zeros(1024))

        if backend_is("crates"):
            assert obj.grouping.dtype == np.dtype("int16")
            if not roundtrip:
                assert obj.quality.dtype == np.dtype("int16")

        elif backend_is("pyfits"):
            assert obj.grouping.dtype == np.dtype(">i2")
            if not roundtrip:
                assert obj.quality.dtype == np.dtype(">i2")

        else:
            pytest.fail("Unrecognized IO backend")

        one, = np.where(obj.grouping == 1)
        expected = [0,  17,  21,  32,  39,  44,  48,  51,  54,  56,  59,  61,  65,
                    68,  71,  75,  78,  82,  88,  96, 101, 110, 116, 124, 130, 133,
                    139, 143, 150, 156, 164, 177, 186, 196, 211, 232, 244, 260, 276,
                    291, 323, 344, 368, 404, 450, 676]
        assert one == pytest.approx(expected)
        assert obj.grouping.sum() == -932
        assert set(obj.grouping) == {-1, 1}
コード例 #45
0
    def check_if_should_run(self):
        # The logic here is this:
        #
        # * If any commit in poky, or a commit touching
        #   meta-mender/meta-mender-core/u-boot, is more recent than a certain
        #   time, carry out the test.
        #
        # * If a commit touching meta-mender/meta-mender-core/u-boot is older
        #   than the given time, but no upstream branch contains it, carry out
        #   the test.
        #
        # * Else, skip the test.
        #
        # The rationale is that the test is extremely time consuming, and
        # therefore we should try to avoid it if the branch has been stable for
        # a while. We include the second conditional above so that PRs are
        # always checked, even if they are old.

        # Number of days that must pass for the branch to be considered stable.
        days_to_be_old = 7

        # Find the repository directories we need
        [poky_dir, meta_mender_dir, _] = (subprocess.check_output(
            "bitbake-layers show-layers | awk '$1~/(^meta$|^meta-mender-core$)/ {print $2}' | xargs -n 1 dirname",
            cwd=os.environ["BUILDDIR"],
            shell=True,
        ).decode().split("\n"))

        # SHA from poky repository, limited by date.
        poky_rev = (subprocess.check_output(
            "git log -n1 --format=%%H --after=%d.days.ago HEAD" %
            days_to_be_old,
            shell=True,
            cwd=poky_dir,
        ).decode().strip())
        if poky_rev:
            print(
                "Running test_uboot_compile because poky commit is more recent than %d days."
                % days_to_be_old)
            return

        uboot_related_paths = [
            "meta-mender-core/recipes-bsp/u-boot",
            "tests/acceptance/test_uboot_automation.py",
            "tests/acceptance/files/Makefile.test_uboot_automation",
        ]

        for uboot_path in uboot_related_paths:
            path_to_check = os.path.join(meta_mender_dir, uboot_path)
            assert os.path.exists(path_to_check), (
                "%s does not exist in the repository. Should the list of paths be updated?"
                % path_to_check)

        # SHA from meta-mender repository, limited by date.
        meta_mender_uboot_rev = (subprocess.check_output(
            ("git log -n1 --format=%%H --after=%d.days.ago HEAD -- " +
             " ".join(uboot_related_paths)) % days_to_be_old,
            cwd=meta_mender_dir,
            shell=True,
        ).decode().strip())
        if meta_mender_uboot_rev:
            print(
                "Running test_uboot_compile because u-boot in meta-mender has been modified more recently than %d days ago."
                % days_to_be_old)
            return

        # SHA from meta-mender repository, not limited by date.
        meta_mender_uboot_rev = (subprocess.check_output(
            "git log -n1 --format=%H HEAD -- " + " ".join(uboot_related_paths),
            cwd=meta_mender_dir,
            shell=True,
        ).decode().strip())
        for remote in subprocess.check_output(["git",
                                               "remote"]).decode().split():
            url = subprocess.check_output("git config --get remote.%s.url" %
                                          remote,
                                          shell=True).decode()
            if "mendersoftware" in url:
                upstream_remote = remote
                break
        else:
            pytest.fail("Upstream remote not found! Should not happen.")

        contained_in = (subprocess.check_output("git branch -r --contains %s" %
                                                meta_mender_uboot_rev,
                                                shell=True).decode().split())
        is_upstream = False
        for branch in contained_in:
            if (branch.startswith("%s/" % upstream_remote)
                    and not branch.startswith("%s/pull/" % upstream_remote)
                    and not branch.startswith("%s/pr_" % upstream_remote)):
                is_upstream = True
                break

        if not is_upstream:
            print(
                "Running test_uboot_compile because meta-mender commit is not upstream yet."
            )
            return

        msg = "Skipping test_uboot_compile because u-boot commits are old and already upstream."
        print(msg)
        pytest.skip(msg)
コード例 #46
0
 def rest_running_report_finishes():
     response.task.reload()
     if "error" in response.task.status.lower():
         pytest.fail("Error when running report: `{}`".format(
             response.task.message))
     return response.task.state.lower() == 'finished'
コード例 #47
0
ファイル: test_multiforest.py プロジェクト: thalman/sssd
    def test_0001_multiforest(multihost, newhostname, adjoin):
        """
        :title: IDM-SSSD-TC: ad_provider: admultiforest
        :id:
        :setup:
          1. Configure two domain controllers in different forests
          2. Join client to the first domain
          3. Update sssd.conf for second domain
          4. Update krb5.conf for second domain
          5. Create krb principal and update sssd.conf
        :steps:
          1. Lookup user in the first domain
          2. Lookup user in the second domain
        :expectedresults:
          1. User is found in the first domain
          2. User is found in the second domain
        :customerscenario: True
        """
        adjoin(membersw='adcli')
        ad_domain = multihost.ad[0].domainname
        ad_server = multihost.ad[0].hostname
        # This must be the last AD server in the metadata file
        ad1_domain = multihost.ad[len(multihost.ad) - 1].domainname
        ad1_domain_upper = str.upper(ad1_domain)
        ad1_server = multihost.ad[len(multihost.ad) - 1].hostname
        ad1_password = multihost.ad[len(multihost.ad) - 1].ssh_password

        get_keytab = f'adcli join --host-keytab=/etc/krb5.keytab-domain1 ' \
                     f'{ad1_domain}'
        change_context = 'chcon -t krb5_keytab_t /etc/krb5.keytab-domain1'
        backup_krb5 = 'cp -rf /etc/krb5.conf /etc/krb5.conf.bak'
        restore_krb5 = 'mv /etc/krb5.conf.bak /etc/krb5.conf ; ' \
                       'restorecon -Rv /etc/krb5.conf'
        cleanup_krb5 = 'rm -rf /etc/krb5.keytab-domain1'
        edit_krb5_conf = f'sed -i "/domain_realm/a .{ad1_domain} ' \
                         f'= {ad1_domain_upper}" /etc/krb5.conf'
        edit1_krb5_conf = f'sed -i "/domain_realm/a {ad1_domain} ' \
                          f'= {ad1_domain_upper}" /etc/krb5.conf'

        try:
            multihost.client[0].run_command(get_keytab,
                                            stdin_text=ad1_password)
        except subprocess.CalledProcessError:
            pytest.fail("adcli join failed")
        multihost.client[0].run_command(backup_krb5, raiseonerr=False)
        multihost.client[0].run_command(edit_krb5_conf, raiseonerr=False)
        multihost.client[0].run_command(edit1_krb5_conf, raiseonerr=False)
        multihost.client[0].run_command(change_context, raiseonerr=False)

        # Configure sssd
        multihost.client[0].service_sssd('stop')
        client = sssdTools(multihost.client[0], multihost.ad[0])
        client.backup_sssd_conf()
        client.sssd_conf("sssd", {
            'domains': f'{ad_domain}, {ad1_domain}'}, action='update')
        domain_params = {
            'ad_domain': f'{ad_domain}',
            'dns_discovery_domain': f'{ad_domain}',
            'ad_server': f'{ad_server}',
            'debug_level': '9',
            'use_fully_qualified_names': 'True',
            'cache_credentials': 'True',
            'dyndns_update': 'True'}
        client.sssd_conf(
            f'domain/{ad_domain}', domain_params, action='update')
        domain1_params = {
            'ad_domain': f'{ad1_domain}',
            'ad_server': f'{ad1_server}',
            'krb5_realm': f'{ad1_domain_upper}',
            'debug_level': '9',
            'use_fully_qualified_names': 'False',
            'cache_credentials': 'True',
            'realmd_tags': 'manages-system joined-with-samba',
            'dyndns_update': 'False',
            'krb5_keytab': '/etc/krb5.keytab-domain1',
            'ldap_krb5_keytab': '/etc/krb5.keytab-domain1',
            'id_provider': 'ad',
            'access_provider': 'ad',
            'timeout': '3600',
            'krb5_store_password_if_offline': 'True',
            'default_shell': '/bin/bash',
            'ldap_id_mapping': 'True'}
        client.sssd_conf(
            f'domain/{ad1_domain}', domain1_params, action='update')
        client.clear_sssd_cache()
        multihost.client[0].service_sssd('start')
        time.sleep(10)
        # Search for the user in same forest and domain
        getent_domain_user1 = multihost.client[0].run_command(
            f'getent passwd user1@{ad_domain}', raiseonerr=False)
        getent_domain_user2 = multihost.client[0].run_command(
            f'getent passwd user2@{ad_domain}', raiseonerr=False)
        id_domain_user1 = multihost.client[0].run_command(
            f'id user1@{ad_domain}', raiseonerr=False)
        id_domain_user2 = multihost.client[0].run_command(
            f'id user2@{ad_domain}', raiseonerr=False)
        # Search for the user in a different forest and domain
        getent_domain1_user1 = multihost.client[0].run_command(
            f'getent passwd user1@{ad1_domain}', raiseonerr=False)
        getent_domain1_user2 = multihost.client[0].run_command(
            f'getent passwd user2@{ad1_domain}', raiseonerr=False)
        id_domain1_user1 = multihost.client[0].run_command(
            f'id user1@{ad1_domain}', raiseonerr=False)
        id_domain1_user2 = multihost.client[0].run_command(
            f'id user2@{ad1_domain}', raiseonerr=False)

        multihost.client[0].run_command(restore_krb5, raiseonerr=False)
        multihost.client[0].run_command(cleanup_krb5, raiseonerr=False)
        client.restore_sssd_conf()
        client.clear_sssd_cache()

        # Evaluate test results
        assert getent_domain_user1.returncode == 0
        assert getent_domain_user2.returncode == 0
        assert id_domain_user1.returncode == 0
        assert id_domain_user2.returncode == 0
        assert getent_domain1_user1.returncode == 0
        assert getent_domain1_user2.returncode == 0
        assert id_domain1_user1.returncode == 0
        assert id_domain1_user2.returncode == 0
コード例 #48
0
ファイル: testing.py プロジェクト: ConorPQuinn/NengoDecimal
    def __exit__(self, type, value, traceback):
        if not any(r.category is self.warning_type for r in self.record):
            pytest.fail("DID NOT RAISE")

        super(warns, self).__exit__(type, value, traceback)
コード例 #49
0
def test_attach_api(pyfile, target, wait_for_attach, is_attached, stop_method):
    @pyfile
    def code_to_debug():
        from debug_me import backchannel, ptvsd, scratchpad
        import sys
        import time

        _, host, port, wait_for_attach, is_attached, stop_method = sys.argv
        port = int(port)
        ptvsd.enable_attach((host, port))

        if wait_for_attach:
            backchannel.send("wait_for_attach")
            ptvsd.wait_for_attach()

        if is_attached:
            backchannel.send("is_attached")
            while not ptvsd.is_attached():
                print("looping until is_attached")
                time.sleep(0.1)

        if stop_method == "break_into_debugger":
            backchannel.send("break_into_debugger?")
            assert backchannel.receive() == "proceed"
            ptvsd.break_into_debugger()
            print("break")  # @break_into_debugger
        else:
            scratchpad["paused"] = False
            backchannel.send("loop?")
            assert backchannel.receive() == "proceed"
            while not scratchpad["paused"]:
                print("looping until paused")
                time.sleep(0.1)

    with debug.Session() as session:
        host, port = runners.attach_by_socket.host, runners.attach_by_socket.port
        session.config.update({"host": host, "port": port})

        backchannel = session.open_backchannel()
        session.spawn_debuggee([
            code_to_debug, host, port, wait_for_attach, is_attached,
            stop_method
        ])
        session.wait_for_enable_attach()

        session.connect_to_adapter((host, port))
        with session.request_attach():
            pass

        if wait_for_attach:
            assert backchannel.receive() == "wait_for_attach"

        if is_attached:
            assert backchannel.receive() == "is_attached"

        if stop_method == "break_into_debugger":
            assert backchannel.receive() == "break_into_debugger?"
            backchannel.send("proceed")
            session.wait_for_stop(expected_frames=[
                some.dap.frame(code_to_debug, "break_into_debugger")
            ])
        elif stop_method == "pause":
            assert backchannel.receive() == "loop?"
            backchannel.send("proceed")
            session.request("pause", freeze=False)
            session.wait_for_stop("pause")
            session.scratchpad["paused"] = True
        else:
            pytest.fail(stop_method)

        session.request_continue()
コード例 #50
0
    def test_incorrect_Kconfig_setting(self, request, prepared_test_build,
                                       bitbake_image):
        """First produce a patch using the auto-patcher, then disable
        auto-patching and apply the patch with a slight modification that makes
        its settings incompatible, and check that this is detected."""

        bitbake_variables = get_bitbake_variables(
            request, "u-boot", prepared_test_build=prepared_test_build)

        # Only run if auto-configuration is on.
        if ("MENDER_UBOOT_AUTO_CONFIGURE" in bitbake_variables
                and bitbake_variables["MENDER_UBOOT_AUTO_CONFIGURE"] == "0"):
            pytest.skip(
                "Test is not applicable when MENDER_UBOOT_AUTO_CONFIGURE is off"
            )

        build_image(
            prepared_test_build["build_dir"],
            prepared_test_build["bitbake_corebase"],
            bitbake_image,
            target="-c save_mender_auto_configured_patch u-boot",
        )

        try:
            patch_name = os.path.join(bitbake_variables["WORKDIR"],
                                      "mender_auto_configured.patch")
            new_patch_name = "../../meta-mender-core/recipes-bsp/u-boot/patches/mender_broken_definition.patch"
            with open(patch_name) as patch, open(new_patch_name,
                                                 "w") as new_patch:
                for line in patch.readlines():
                    if line.startswith("+CONFIG_MTDIDS_DEFAULT="):
                        # Change to a wrong value:
                        new_patch.write(
                            '+CONFIG_MTDIDS_DEFAULT="nand0-wrongvalue=00000000.flash"\n'
                        )
                    else:
                        new_patch.write(line)

            # We need to add the code using TEST_SRC_URI_APPEND make sure it is
            # absolutely last, otherwise platform specific layers may add
            # patches after us.

            # Normally changes to SRC_URI are picked up automatically, but since
            # we are sneaking it in via the TEST_SRC_URI_APPEND and its
            # associated python snippet, we need to clean the build manually.

            build_image(
                prepared_test_build["build_dir"],
                prepared_test_build["bitbake_corebase"],
                bitbake_image,
                [
                    'MENDER_UBOOT_AUTO_CONFIGURE_pn-u-boot = "0"',
                    'TEST_SRC_URI_APPEND_pn-u-boot = " file://%s"' %
                    os.path.basename(new_patch_name),
                ],
                target="-c clean u-boot",
            )

            try:
                build_image(
                    prepared_test_build["build_dir"],
                    prepared_test_build["bitbake_corebase"],
                    bitbake_image,
                    target="-c compile u-boot",
                )

                # Should never get here.
                pytest.fail(
                    "Bitbake succeeded even though we intentionally broke the patch!"
                )

            except subprocess.CalledProcessError as e:
                # A bit risky change after upgrading tests from python2.7 to python3.
                # It seems that underneath subprocess.check_output() call in not
                # capturing the output as `capture_output` flag is not set.
                if e.output:
                    assert e.output.find(
                        "Please fix U-Boot's configuration file") >= 0

        finally:
            build_image(
                prepared_test_build["build_dir"],
                prepared_test_build["bitbake_corebase"],
                bitbake_image,
                target="-c clean u-boot",
            )
            os.unlink(new_patch_name)
コード例 #51
0
ファイル: ogr_oapif.py プロジェクト: whatcoloris/gdal
def NO_LONGER_USED_test_ogr_opaif_fc_links_next_headers():
    if gdaltest.opaif_drv is None:
        pytest.skip()

    if gdaltest.webserver_port == 0:
        pytest.skip()

    handler = webserver.SequentialHandler()
    handler.add('GET', '/oapif/collections', 200,
                {'Content-Type': 'application/json'},
                '{ "collections" : [ { "name": "foo" }] }')
    with webserver.install_http_handler(handler):
        ds = ogr.Open('OAPIF:http://localhost:%d/oapif' %
                      gdaltest.webserver_port)
    lyr = ds.GetLayer(0)

    handler = webserver.SequentialHandler()
    handler.add(
        'GET', '/oapif/collections/foo/items?limit=10', 200,
        {'Content-Type': 'application/geo+json'},
        """{ "type": "FeatureCollection", "features": [
                    {
                        "type": "Feature",
                        "properties": {
                            "foo": "bar"
                        }
                    }
                ] }""")
    with webserver.install_http_handler(handler):
        assert lyr.GetLayerDefn().GetFieldCount() == 1

    handler = webserver.SequentialHandler()
    link_val = '<http://data.example.org/buildings.json>; rel="self"; type="application/geo+json"\r\nLink: <http://localhost:%d/oapif/foo_next>; rel="next"; type="application/geo+json"' % gdaltest.webserver_port
    handler.add(
        'GET', '/oapif/collections/foo/items?limit=10', 200, {
            'Content-Type': 'application/geo+json',
            'Link': link_val
        }, """{ "type": "FeatureCollection",
                    "features": [
                    {
                        "type": "Feature",
                        "properties": {
                            "foo": "bar"
                        }
                    }
                ] }""")
    with webserver.install_http_handler(handler):
        f = lyr.GetNextFeature()
    if f['foo'] != 'bar':
        f.DumpReadable()
        pytest.fail()

    handler = webserver.SequentialHandler()
    handler.add(
        'GET', '/oapif/foo_next', 200,
        {'Content-Type': 'application/geo+json'},
        """{ "type": "FeatureCollection",
                    "features": [
                    {
                        "type": "Feature",
                        "properties": {
                            "foo": "baz"
                        }
                    }
                ] }""")
    with webserver.install_http_handler(handler):
        f = lyr.GetNextFeature()
    if f['foo'] != 'baz':
        f.DumpReadable()
        pytest.fail()
コード例 #52
0
def test_ogrinfo_24():
    if test_cli_utilities.get_ogrinfo_path() is None:
        pytest.skip()

    f = open('tmp/test_ogrinfo_24.vrt', 'wt')
    f.write("""<OGRVRTDataSource>
    <Metadata>
        <MDI key="foo">bar</MDI>
    </Metadata>
    <Metadata domain="other_domain">
        <MDI key="baz">foo</MDI>
    </Metadata>
    <OGRVRTLayer name="poly">
        <Metadata>
            <MDI key="bar">baz</MDI>
        </Metadata>
        <SrcDataSource relativeToVRT="1" shared="1">../../ogr/data/poly.shp</SrcDataSource>
        <SrcLayer>poly</SrcLayer>
  </OGRVRTLayer>
</OGRVRTDataSource>""")
    f.close()

    ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() +
                               ' -ro -al tmp/test_ogrinfo_24.vrt -so',
                               check_memleak=False)
    expected_ret = """INFO: Open of `tmp/test_ogrinfo_24.vrt'
      using driver `OGR_VRT' successful.
Metadata:
  foo=bar

Layer name: poly
Metadata:
  bar=baz
Geometry: Polygon
Feature Count: 10
Extent: (478315.531250, 4762880.500000) - (481645.312500, 4765610.500000)
Layer SRS WKT:
PROJCS["OSGB 1936 / British National Grid",
    GEOGCS["OSGB 1936",
        DATUM["OSGB_1936",
            SPHEROID["Airy 1830",6377563.396,299.3249646,
                AUTHORITY["EPSG","7001"]],
            TOWGS84[446.448,-125.157,542.06,0.15,0.247,0.842,-20.489],
            AUTHORITY["EPSG","6277"]],
        PRIMEM["Greenwich",0,
            AUTHORITY["EPSG","8901"]],
        UNIT["degree",0.0174532925199433,
            AUTHORITY["EPSG","9122"]],
        AUTHORITY["EPSG","4277"]],
    PROJECTION["Transverse_Mercator"],
    PARAMETER["latitude_of_origin",49],
    PARAMETER["central_meridian",-2],
    PARAMETER["scale_factor",0.9996012717],
    PARAMETER["false_easting",400000],
    PARAMETER["false_northing",-100000],
    UNIT["metre",1,
        AUTHORITY["EPSG","9001"]],
    AXIS["Easting",EAST],
    AXIS["Northing",NORTH],
    AUTHORITY["EPSG","27700"]]
"""
    expected_lines = expected_ret.splitlines()
    lines = ret.splitlines()
    for i, exp_line in enumerate(expected_lines):
        if exp_line != lines[i]:
            if gdaltest.is_travis_branch('mingw'):
                return 'expected_fail'
            pytest.fail(ret)
    ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() +
                               ' -ro -al tmp/test_ogrinfo_24.vrt -so -mdd all',
                               check_memleak=False)
    expected_ret = """INFO: Open of `tmp/test_ogrinfo_24.vrt'
      using driver `OGR_VRT' successful.
Metadata:
  foo=bar
Metadata (other_domain):
  baz=foo

Layer name: poly
Metadata:
  bar=baz
Geometry: Polygon
Feature Count: 10
Extent: (478315.531250, 4762880.500000) - (481645.312500, 4765610.500000)
Layer SRS WKT:
PROJCS["OSGB 1936 / British National Grid",
    GEOGCS["OSGB 1936",
        DATUM["OSGB_1936",
            SPHEROID["Airy 1830",6377563.396,299.3249646,
                AUTHORITY["EPSG","7001"]],
            TOWGS84[446.448,-125.157,542.06,0.15,0.247,0.842,-20.489],
            AUTHORITY["EPSG","6277"]],
        PRIMEM["Greenwich",0,
            AUTHORITY["EPSG","8901"]],
        UNIT["degree",0.0174532925199433,
            AUTHORITY["EPSG","9122"]],
        AUTHORITY["EPSG","4277"]],
    PROJECTION["Transverse_Mercator"],
    PARAMETER["latitude_of_origin",49],
    PARAMETER["central_meridian",-2],
    PARAMETER["scale_factor",0.9996012717],
    PARAMETER["false_easting",400000],
    PARAMETER["false_northing",-100000],
    UNIT["metre",1,
        AUTHORITY["EPSG","9001"]],
    AXIS["Easting",EAST],
    AXIS["Northing",NORTH],
    AUTHORITY["EPSG","27700"]]
AREA: Real (12.3)
EAS_ID: Integer64 (11.0)
PRFEDEA: String (16.0)
"""
    expected_lines = expected_ret.splitlines()
    lines = ret.splitlines()
    for i, exp_line in enumerate(expected_lines):
        if exp_line != lines[i]:
            if gdaltest.is_travis_branch('mingw'):
                return 'expected_fail'
            pytest.fail(ret)

    ret = gdaltest.runexternal(test_cli_utilities.get_ogrinfo_path() +
                               ' -ro -al tmp/test_ogrinfo_24.vrt -so -nomd',
                               check_memleak=False)
    expected_ret = """INFO: Open of `tmp/test_ogrinfo_24.vrt'
      using driver `OGR_VRT' successful.

Layer name: poly
Geometry: Polygon
Feature Count: 10
Extent: (478315.531250, 4762880.500000) - (481645.312500, 4765610.500000)
Layer SRS WKT:
PROJCS["OSGB 1936 / British National Grid",
    GEOGCS["OSGB 1936",
        DATUM["OSGB_1936",
            SPHEROID["Airy 1830",6377563.396,299.3249646,
                AUTHORITY["EPSG","7001"]],
            TOWGS84[446.448,-125.157,542.06,0.15,0.247,0.842,-20.489],
            AUTHORITY["EPSG","6277"]],
        PRIMEM["Greenwich",0,
            AUTHORITY["EPSG","8901"]],
        UNIT["degree",0.0174532925199433,
            AUTHORITY["EPSG","9122"]],
        AUTHORITY["EPSG","4277"]],
    PROJECTION["Transverse_Mercator"],
    PARAMETER["latitude_of_origin",49],
    PARAMETER["central_meridian",-2],
    PARAMETER["scale_factor",0.9996012717],
    PARAMETER["false_easting",400000],
    PARAMETER["false_northing",-100000],
    UNIT["metre",1,
        AUTHORITY["EPSG","9001"]],
    AXIS["Easting",EAST],
    AXIS["Northing",NORTH],
    AUTHORITY["EPSG","27700"]]
AREA: Real (12.3)
EAS_ID: Integer64 (11.0)
PRFEDEA: String (16.0)
"""
    expected_lines = expected_ret.splitlines()
    lines = ret.splitlines()
    for i, exp_line in enumerate(expected_lines):
        if exp_line != lines[i]:
            if gdaltest.is_travis_branch('mingw'):
                return 'expected_fail'
            pytest.fail(ret)

    os.unlink('tmp/test_ogrinfo_24.vrt')
コード例 #53
0
def test_openapi_tools_validate_v3():
    ma_plugin = MarshmallowPlugin()
    spec = APISpec(
        title='Pets',
        version='0.1',
        plugins=(ma_plugin, ),
        openapi_version='3.0.0',
    )
    #openapi = ma_plugin.openapi

    spec.definition('Category', schema=CategorySchema)
    spec.definition('Pet', schema=PetSchemaV3)

    spec.add_path(
        view=None,
        path='/category/{category_id}',
        operations={
            'get': {
                'parameters': [
                    {
                        'name': 'q',
                        'in': 'query',
                        'schema': {
                            'type': 'string'
                        },
                    },
                    {
                        'name': 'category_id',
                        'in': 'path',
                        'required': True,
                        'schema': {
                            'type': 'string'
                        },
                    },
                ],  # + openapi.schema2parameters(PageSchema, default_in='query'),
                'responses': {
                    200: {
                        'description': 'success',
                        'content': {
                            'application/json': {
                                'schema': PetSchemaV3,
                            },
                        },
                    },
                },
            },
            'post': {
                'parameters': ([
                    {
                        'name': 'category_id',
                        'in': 'path',
                        'required': True,
                        'schema': {
                            'type': 'string'
                        },
                    },
                ]),
                'requestBody': {
                    'content': {
                        'application/json': {
                            'schema': CategorySchema,
                        },
                    },
                },
                'responses': {
                    201: {
                        'description': 'created',
                        'content': {
                            'application/json': {
                                'schema': PetSchemaV3,
                            },
                        },
                    },
                },
            },
        },
    )
    try:
        utils.validate_spec(spec)
    except exceptions.OpenAPIError as error:
        pytest.fail(str(error))
def test_put_get_large_files_gcp(tmpdir, conn_cnx, db_parameters,
                                 is_public_test, enable_gcs_downscoped):
    """[gcp] Puts and Gets Large files."""
    if enable_gcs_downscoped and is_public_test:
        pytest.xfail(
            "Server need to update with merged change. Expected release version: 4.41.0"
        )
    number_of_files = 3
    number_of_lines = 200000
    tmp_dir = generate_k_lines_of_n_files(number_of_lines,
                                          number_of_files,
                                          tmp_dir=str(tmpdir.mkdir('data')))
    folder_name = random_string(5, 'test_put_get_large_files_gcp_')

    files = os.path.join(tmp_dir, 'file*')
    output_dir = os.path.join(tmp_dir, 'output_dir')
    os.makedirs(output_dir)

    class cb(SnowflakeProgressPercentage):
        def __init__(self, filename, filesize, **_):
            pass

        def __call__(self, bytes_amount):
            pass

    def run(cnx, sql):
        return cnx.cursor().execute(sql.format(files=files,
                                               dir=folder_name,
                                               output_dir=output_dir),
                                    _put_callback_output_stream=sys.stdout,
                                    _get_callback_output_stream=sys.stdout,
                                    _get_callback=cb,
                                    _put_callback=cb).fetchall()

    with conn_cnx() as cnx:
        try:
            try:
                run(
                    cnx,
                    f'ALTER SESSION SET GCS_USE_DOWNSCOPED_CREDENTIAL = {enable_gcs_downscoped}'
                )
            except ProgrammingError as e:
                if enable_gcs_downscoped:
                    # not raise error when the parameter is not available yet, using old behavior
                    raise e
            all_recs = run(cnx, "PUT file://{files} @~/{dir}")
            assert all([rec[6] == 'UPLOADED' for rec in all_recs])

            for _ in range(60):
                for _ in range(100):
                    all_recs = run(cnx, "LIST @~/{dir}")
                    if len(all_recs) == number_of_files:
                        break
                    # you may not get the files right after PUT command
                    # due to the nature of gcs blob, which synchronizes
                    # data eventually.
                    time.sleep(1)
                else:
                    # wait for another second and retry.
                    # this could happen if the files are partially available
                    # but not all.
                    time.sleep(1)
                    continue
                break  # success
            else:
                pytest.fail(
                    'cannot list all files. Potentially '
                    'PUT command missed uploading Files: {}'.format(all_recs))
            all_recs = run(cnx, "GET @~/{dir} file://{output_dir}")
            assert len(all_recs) == number_of_files
            assert all([rec[2] == 'DOWNLOADED' for rec in all_recs])
        finally:
            run(cnx, "RM @~/{dir}")
コード例 #55
0
def test_other_docs(mod, capsys):
    fail, total = doctest.testmod(mod, optionflags=doctest.REPORT_NDIFF)
    if fail > 0:
        captured = capsys.readouterr()
        pytest.fail("{} out of {} examples failed:\n{}\n{}".format(
            fail, total, captured.err, captured.out), pytrace=False)
コード例 #56
0
ファイル: ogr_oapif.py プロジェクト: whatcoloris/gdal
def test_ogr_opaif_fc_links_next_geojson():
    if gdaltest.opaif_drv is None:
        pytest.skip()

    if gdaltest.webserver_port == 0:
        pytest.skip()

    handler = webserver.SequentialHandler()
    handler.add('GET', '/oapif/collections', 200,
                {'Content-Type': 'application/json'},
                '{ "collections" : [ { "name": "foo" }] }')
    with webserver.install_http_handler(handler):
        ds = ogr.Open('OAPIF:http://localhost:%d/oapif' %
                      gdaltest.webserver_port)
    lyr = ds.GetLayer(0)

    handler = webserver.SequentialHandler()
    handler.add(
        'GET', '/oapif/collections/foo/items?limit=10', 200,
        {'Content-Type': 'application/geo+json'},
        """{ "type": "FeatureCollection", "features": [
                    {
                        "type": "Feature",
                        "properties": {
                            "foo": "bar"
                        }
                    }
                ] }""")
    with webserver.install_http_handler(handler):
        assert lyr.GetLayerDefn().GetFieldCount() == 1

    handler = webserver.SequentialHandler()
    handler.add(
        'GET', '/oapif/collections/foo/items?limit=10', 200,
        {'Content-Type': 'application/geo+json'},
        """{ "type": "FeatureCollection",
                    "links" : [
                        { "rel": "next", "type": "application/geo+json", "href": "http://localhost:%d/oapif/foo_next" }
                    ],
                    "features": [
                    {
                        "type": "Feature",
                        "properties": {
                            "foo": "bar"
                        }
                    }
                ] }""" % gdaltest.webserver_port)
    with webserver.install_http_handler(handler):
        f = lyr.GetNextFeature()
    if f['foo'] != 'bar':
        f.DumpReadable()
        pytest.fail()

    handler = webserver.SequentialHandler()
    handler.add(
        'GET', '/oapif/foo_next', 200,
        {'Content-Type': 'application/geo+json'},
        """{ "type": "FeatureCollection",
                    "features": [
                    {
                        "type": "Feature",
                        "properties": {
                            "foo": "baz"
                        }
                    }
                ] }""")
    with webserver.install_http_handler(handler):
        f = lyr.GetNextFeature()
    if f['foo'] != 'baz':
        f.DumpReadable()
        pytest.fail()
コード例 #57
0
def test_grep(config, test_list):
    cards = []
    runner = CliRunner()
    # Test no-flag behavior
    cards.append(test_list.add_card('GREP_TESTING'))
    args = ['--board', config.test_board, 'grep', '--json', 'GREP_TEST']
    result = runner.invoke(cli, args)
    assert result.exit_code == 0, result.output
    try:
        grep_results = json.loads(result.output)
    except json.decoder.JSONDecodeError:
        print(result.output)
        [c.delete() for c in cards]
        pytest.fail('Output of `{0}` is not valid JSON'.format(' '.join(args)))
    assert len(grep_results) == 1
    # Test -i/--insensitive
    cards.append(test_list.add_card('grep_t'))
    args = ['--board', config.test_board, 'grep', '--json', '-i', 'gReP.t']
    result = runner.invoke(cli, args)
    assert result.exit_code == 0, result.output
    try:
        grep_results = json.loads(result.output)
    except json.decoder.JSONDecodeError:
        print(result.output)
        [c.delete() for c in cards]
        pytest.fail('Output of `{0}` is not valid JSON'.format(' '.join(args)))
    assert len(grep_results) == 2
    # Test -e/--regexp
    cards.append(test_list.add_card('foo'))
    cards.append(test_list.add_card('bar'))
    cards.append(test_list.add_card('baz'))
    args = [
        '--board', config.test_board, 'grep', '--json', '-e', 'foo', '-e',
        'bar'
    ]
    result = runner.invoke(cli, args)
    assert result.exit_code == 0, result.output
    try:
        grep_results = json.loads(result.output)
    except json.decoder.JSONDecodeError:
        print(result.output)
        [c.delete() for c in cards]
        pytest.fail('Output of `{0}` is not valid JSON'.format(' '.join(args)))
    assert len(grep_results) == 2
    args = [
        '--board', config.test_board, 'grep', '--json', '-i', '-e', 'grep',
        '-e', 'ba[rz]'
    ]
    result = runner.invoke(cli, args)
    assert result.exit_code == 0, result.output
    try:
        grep_results = json.loads(result.output)
    except json.decoder.JSONDecodeError:
        print(result.output)
        [c.delete() for c in cards]
        pytest.fail('Output of `{0}` is not valid JSON'.format(' '.join(args)))
    # grep_t, GREP_TESTING, bar, and baz
    assert len(grep_results) == 4
    # Test -e and an argument given at the same time
    args = [
        '--board', config.test_board, 'grep', '--json', '-i', '-e', 'foo',
        '-e', 'grep', 'ba[rz]'
    ]
    result = runner.invoke(cli, args)
    assert result.exit_code == 0, result.output
    try:
        grep_results = json.loads(result.output)
    except json.decoder.JSONDecodeError:
        print(result.output)
        [c.delete() for c in cards]
        pytest.fail('Output of `{0}` is not valid JSON'.format(' '.join(args)))
    # grep_t, GREP_TESTING, foo, bar, and baz
    assert len(grep_results) == 5
    # Test -c/--count
    args = ['--board', config.test_board, 'grep', '-c', 'foo|grep|ba[rz]']
    result = runner.invoke(cli, args)
    assert result.exit_code == 0, result.output
    # Test whether the count matches our expectation
    assert int(result.output) == 4
    # Cleanup
    for c in cards:
        c.delete()
コード例 #58
0
def test_openapi_tools_validate_v2():
    ma_plugin = MarshmallowPlugin()
    spec = APISpec(
        title='Pets',
        version='0.1',
        plugins=(ma_plugin, ),
        openapi_version='2.0',
    )
    openapi = ma_plugin.openapi

    spec.definition('Category', schema=CategorySchema)
    spec.definition('Pet',
                    schema=PetSchema,
                    extra_fields={'discriminator': 'name'})

    spec.add_path(
        view=None,
        path='/category/{category_id}',
        operations={
            'get': {
                'parameters': [
                    {
                        'name': 'q',
                        'in': 'query',
                        'type': 'string'
                    },
                    {
                        'name': 'category_id',
                        'in': 'path',
                        'required': True,
                        'type': 'string'
                    },
                    openapi.field2parameter(
                        field=fields.List(
                            fields.Str(),
                            validate=validate.OneOf(['freddie', 'roger']),
                            location='querystring',
                        ),
                        name='body',
                        use_refs=False,
                    ),
                ] + openapi.schema2parameters(PageSchema, default_in='query'),
                'responses': {
                    200: {
                        'schema': PetSchema,
                        'description': 'A pet',
                    },
                },
            },
            'post': {
                'parameters': ([{
                    'name': 'category_id',
                    'in': 'path',
                    'required': True,
                    'type': 'string'
                }] + openapi.schema2parameters(CategorySchema,
                                               default_in='body')),
                'responses': {
                    201: {
                        'schema': PetSchema,
                        'description': 'A pet',
                    },
                },
            },
        },
    )
    try:
        utils.validate_spec(spec)
    except exceptions.OpenAPIError as error:
        pytest.fail(str(error))
コード例 #59
0
def test_simple():
    to_child = mp_context.Queue()
    from_child = mp_context.Queue()

    proc = AsyncProcess(target=feed, args=(to_child, from_child))
    assert not proc.is_alive()
    assert proc.pid is None
    assert proc.exitcode is None
    assert not proc.daemon
    proc.daemon = True
    assert proc.daemon

    wr1 = weakref.ref(proc)
    wr2 = weakref.ref(proc._process)

    # join() before start()
    with pytest.raises(AssertionError):
        yield proc.join()

    yield proc.start()
    assert proc.is_alive()
    assert proc.pid is not None
    assert proc.exitcode is None

    t1 = time()
    yield proc.join(timeout=0.02)
    dt = time() - t1
    assert 0.2 >= dt >= 0.01
    assert proc.is_alive()
    assert proc.pid is not None
    assert proc.exitcode is None

    # setting daemon attribute after start()
    with pytest.raises(AssertionError):
        proc.daemon = False

    to_child.put(5)
    assert from_child.get() == 5

    # child should be stopping now
    t1 = time()
    yield proc.join(timeout=10)
    dt = time() - t1
    assert dt <= 1.0
    assert not proc.is_alive()
    assert proc.pid is not None
    assert proc.exitcode == 0

    # join() again
    t1 = time()
    yield proc.join()
    dt = time() - t1
    assert dt <= 0.6

    del proc
    gc.collect()
    if wr1() is not None:
        # Help diagnosing
        from types import FrameType
        p = wr1()
        if p is not None:
            rc = sys.getrefcount(p)
            refs = gc.get_referrers(p)
            del p
            print("refs to proc:", rc, refs)
            frames = [r for r in refs if isinstance(r, FrameType)]
            for i, f in enumerate(frames):
                print("frames #%d:" % i,
                      f.f_code.co_name, f.f_code.co_filename, sorted(f.f_locals))
        pytest.fail("AsyncProcess should have been destroyed")
    t1 = time()
    while wr2() is not None:
        yield gen.sleep(0.01)
        gc.collect()
        dt = time() - t1
        assert dt < 2.0
コード例 #60
0
def test_core_docs(mod, capsys):
    fail, total = doctest.testmod(mod)
    if fail > 0:
        captured = capsys.readouterr()
        pytest.fail("{} out of {} examples failed:\n{}\n{}".format(
            fail, total, captured.err, captured.out), pytrace=False)