class VersionTests(unittest.TestCase):
    """A set of tests to ensure that the version command runs in the way
    that we expect.
    """
    def setUp(self):
        self.runner = CliRunner()

    def test_version_command(self):
        """Establish that the version command returns the output we
        expect.
        """
        # Set up output from the /config/ endpoint in Tower and
        # invoke the command.
        with client.test_mode as t:
            t.register_json('/config/', {'version': '4.21'})
            result = self.runner.invoke(version)

            # Verify that we got the output we expected.
            self.assertEqual(result.exit_code, 0)
            self.assertEqual(
                result.output.strip(),
                'Ansible Tower 4.21\nTower CLI %s' % tower_cli.__version__,
            )

    def test_cannot_connect(self):
        """Establish that the version command gives a nice error in cases
        where it cannot connect to Tower.
        """
        with mock.patch.object(client, 'get') as get:
            get.side_effect = requests.exceptions.RequestException
            result = self.runner.invoke(version)
            self.assertEqual(result.exit_code, 1)
            self.assertIn('Could not connect to Ansible Tower.', result.output)
Example #2
0
def test_error_verbosity_003(caplog):
    """
    Testing debug level verbosity on setting error
    """
    runner = CliRunner()

    # Temporary isolated current dir
    with runner.isolated_filesystem():
        test_cwd = os.getcwd()

        # Silent
        result = runner.invoke(cli_frontend, ['-v 5', 'compile'])

        error_msg = 'Unable to find any settings in directory: {}'.format(
            test_cwd
        )

        assert result.exit_code == 1

        assert caplog.record_tuples == [
            (
                'boussole',
                20,
                'Building project'
            ),
            (
                'boussole',
                50,
                error_msg
            )
        ]
        assert error_msg in result.output
        assert 'Aborted!' in result.output
Example #3
0
def test_error_verbosity_001(caplog, options, filename):
    """
    Testing default verbosity (aka INFO level) on setting error with
    different backends
    """
    runner = CliRunner()

    # Temporary isolated current dir
    with runner.isolated_filesystem():
        test_cwd = os.getcwd()

        # Default verbosity
        result = runner.invoke(cli_frontend, ['compile']+options)

        assert result.exit_code == 1

        assert caplog.record_tuples == [
            (
                'boussole',
                20,
                'Building project'
            ),
            (
                'boussole',
                50,
                'Unable to find any settings in directory: {}'.format(test_cwd)
            )
        ]


        assert 'Aborted!' in result.output
Example #4
0
def test_images(monkeypatch):
    image = MagicMock()
    image.id = 'ami-123'
    image.name = 'BrandNewImage'
    image.creationDate = datetime.datetime.utcnow().isoformat('T') + 'Z'

    old_image_still_used = MagicMock()
    old_image_still_used.id = 'ami-456'
    old_image_still_used.name = 'OldImage'
    old_image_still_used.creationDate = (datetime.datetime.utcnow() - datetime.timedelta(days=30)).isoformat('T') + 'Z'

    instance = MagicMock()
    instance.id = 'i-777'
    instance.image_id = 'ami-456'
    instance.tags = {'aws:cloudformation:stack-name': 'mystack'}

    ec2 = MagicMock()
    ec2.get_all_images.return_value = [image, old_image_still_used]
    ec2.get_only_instances.return_value = [instance]
    monkeypatch.setattr('boto.cloudformation.connect_to_region', lambda x: MagicMock())
    monkeypatch.setattr('boto.ec2.connect_to_region', lambda x: ec2)
    monkeypatch.setattr('boto.iam.connect_to_region', lambda x: MagicMock())

    runner = CliRunner()

    with runner.isolated_filesystem():
        result = runner.invoke(cli, ['images', '--region=myregion'], catch_exceptions=False)

    assert 'ami-123' in result.output
    assert 'ami-456' in result.output
    assert 'mystack' in result.output
Example #5
0
def test_error_verbosity_002(caplog):
    """
    Testing silent on setting error
    """
    runner = CliRunner()

    # Temporary isolated current dir
    with runner.isolated_filesystem():
        test_cwd = os.getcwd()

        # Silent
        result = runner.invoke(cli_frontend, ['-v 0', 'compile'])

        error_msg = 'Unable to find any settings in directory: {}'.format(
            test_cwd
        )

        assert result.exit_code == 1

        assert caplog.record_tuples == [
            (
                'boussole',
                50,
                error_msg
            )
        ]

        # Totally silent output excepted the one from click.Abort()
        assert error_msg not in result.output
        assert 'Aborted!' in result.output
Example #6
0
def test_format_jpeg(tmpdir):
    outputname = str(tmpdir.join('stacked.jpg'))
    runner = CliRunner()
    result = runner.invoke(
        bands.stack,
        ['tests/data/RGB.byte.tif', outputname, '--format', 'JPEG'])
    assert result.exit_code == 0
Example #7
0
def test_error(tmpdir):
    outputname = str(tmpdir.join('stacked.tif'))
    runner = CliRunner()
    result = runner.invoke(
        bands.stack,
        ['tests/data/RGB.byte.tif', outputname, '--driver', 'BOGUS'])
    assert result.exit_code == 1
Example #8
0
def test_get_password_from_cache(monkeypatch):
    user = '******'
    resource = 'http://example.com'

    @doubleclick.click.command()
    @doubleclick.click.pass_context
    def fake_app(ctx):
        ctx.obj = {}
        x = utils.get_password(user, resource)
        click.echo('Password is {}'.format(x))
        monkeypatch.setattr(doubleclick.click, 'prompt', blow_up)

        assert (user, 'example.com') in ctx.obj['passwords']
        x = utils.get_password(user, resource)
        click.echo('Password is {}'.format(x))

    runner = CliRunner()
    result = runner.invoke(fake_app, input='my_password\n')
    assert not result.exception
    assert result.output.splitlines() == [
        'Server password for {} at host {}: '.format(user, 'example.com'),
        'Save this password in the keyring? [y/N]: ',
        'Password is my_password',
        'debug: Got password for my_user from internal cache',
        'Password is my_password'
    ]
Example #9
0
def test_get_password_from_command(tmpdir):
    username = '******'
    resource = 'http://example.com'
    password = '******'
    filename = 'command.sh'

    filepath = str(tmpdir) + '/' + filename
    f = open(filepath, 'w')
    f.write('#!/bin/sh\n'
            '[ "$1" != "my_username" ] && exit 1\n'
            '[ "$2" != "example.com" ] && exit 1\n'
            'echo "{}"'.format(password))
    f.close()

    st = os.stat(filepath)
    os.chmod(filepath, st.st_mode | stat.S_IEXEC)

    @doubleclick.click.command()
    @doubleclick.click.pass_context
    def fake_app(ctx):
        ctx.obj = {'config': ({'password_command': filepath}, {}, {})}
        _password = utils.get_password(username, resource)
        assert _password == password

    runner = CliRunner()
    result = runner.invoke(fake_app)
    assert not result.exception
Example #10
0
def test_init_without_cloud_server(
    config, push_design_documents, get_or_create, generate_config
):
    runner = CliRunner()

    generate_config.return_value = {"test": {"test": "test"}}
    httpretty.register_uri(
        httpretty.GET, "http://localhost:5984/_config/test/test",
        body='"test_val"'
    )
    httpretty.register_uri(
        httpretty.PUT, "http://localhost:5984/_config/test/test"
    )

    # Show -- Should throw an error because no local server is selected
    res = runner.invoke(show)
    assert res.exit_code, res.output

    # Init -- Should work and push the design documents but not replicate
    # anything
    res = runner.invoke(init)
    assert res.exit_code == 0, res.exception or res.output
    assert get_or_create.call_count == len(all_dbs)
    assert push_design_documents.call_count == 1
    push_design_documents.reset_mock()

    # Show -- Should work
    res = runner.invoke(show)
    assert res.exit_code == 0, res.exception or res.output

    # Init -- Should throw an error because a different database is already
    # selected
    res = runner.invoke(init, ["--db_url", "http://test.test:5984"])
    assert res.exit_code, res.output
Example #11
0
def test_set_keyring_password(monkeypatch):
    class KeyringMock(object):
        def get_password(self, resource, username):
            assert resource == utils.password_key_prefix + 'example.com'
            assert username == 'foouser'
            return None

        def set_password(self, resource, username, password):
            assert resource == utils.password_key_prefix + 'example.com'
            assert username == 'foouser'
            assert password == 'hunter2'

    monkeypatch.setattr(utils, 'keyring', KeyringMock())

    @doubleclick.click.command()
    @doubleclick.click.pass_context
    def fake_app(ctx):
        ctx.obj = {}
        x = utils.get_password('foouser', 'http://example.com/a/b')
        click.echo('password is ' + x)

    runner = CliRunner()
    result = runner.invoke(fake_app, input='hunter2\ny\n')
    assert not result.exception
    assert result.output == (
        'Server password for foouser at host example.com: \n'
        'Save this password in the keyring? [y/N]: y\n'
        'password is hunter2\n'
    )
def test_with_successful_deploy_sans_runtime_bytecode(project):
    chain = project.get_chain('testrpc')

    exports = []

    with chain:
        Math = chain.provider.get_contract_factory('Math')

        Math.bytecode_runtime = None
        assert Math.bytecode_runtime is None

        @click.command()
        def wrapper():
            math_contract = deploy_contract_and_verify(
                chain,
                contract_name='Math',
                ContractFactory=Math,
            )
            exports.append(math_contract)
            print("~~{0}~~".format(math_contract.address))

        runner = CliRunner()
        result = runner.invoke(wrapper, [])

    assert result.exit_code == 0, str(result.output) + '\n' + str(result.exception)
    assert len(exports) == 1
    math_contract = exports[0]
    expected = "~~{0}~~".format(math_contract.address)
    assert expected in result.output
    assert "Verified contract bytecode" not in result.output
    assert "No runtime available" in result.output
Example #13
0
def test_cli():
    runner = CliRunner()
    result = runner.invoke(cli,
        ['20141201', '20141216', 'tests/test.geojson', 'tests/lc8_test.csv']
        )
    assert result.exit_code == 0
    assert result.output == "20141201 - 20141216 - Rate of clouds: 7.76\n"
Example #14
0
def test_call_click_commands(divio_project, command):
    current_dir = os.getcwd()
    os.chdir(os.path.join(current_dir, divio_project))
    runner = CliRunner()
    result = runner.invoke(cli.cli, command)
    os.chdir(current_dir)
    assert result.exit_code == 0
def test_deploying_contract_with_error_during_deploy_sanity_check(project):
    """
    Just a sanity check that the `Thrower` contract can be successfully
    deployed.
    """
    chain = project.get_chain('testrpc')

    exports = []

    with chain:
        ThrowsInConstructor = chain.provider.get_contract_factory('ThrowsInConstructor')

        @click.command()
        def wrapper():
            math_contract = deploy_contract_and_verify(
                chain,
                contract_name='ThrowsInConstructor',
                ContractFactory=ThrowsInConstructor,
                deploy_args=[False],
            )
            exports.append(math_contract)
            print("~~{0}~~".format(math_contract.address))

        runner = CliRunner()
        result = runner.invoke(wrapper, [])

    assert result.exit_code == 0
    assert exports
    assert "Verified contract bytecode" in result.output
    assert "No runtime available" not in result.output
Example #16
0
def test_returned_filesize():
    runner = CliRunner()

    result = runner.invoke(
        cli,
        ['search',
        environ.get('SENTINEL_USER'),
        environ.get('SENTINEL_PASSWORD'),
        'tests/map.geojson',
        '--url', 'https://scihub.copernicus.eu/dhus/',
        '-s', '20141205',
        '-e', '20141208',
        '-q', 'producttype=GRD']
        )
    expected = "1 scenes found with a total size of 0.50 GB"
    assert result.output.split("\n")[-2] == expected

    result = runner.invoke(
        cli,
        ['search',
        environ.get('SENTINEL_USER'),
        environ.get('SENTINEL_PASSWORD'),
        'tests/map.geojson',
        '--url', 'https://scihub.copernicus.eu/dhus/',
        '-s', '20140101',
        '-e', '20141231',
        '-q', 'producttype=GRD']
        )
    expected = "20 scenes found with a total size of 11.06 GB"
    assert result.output.split("\n")[-2] == expected
Example #17
0
def test_cli():
    runner = CliRunner()
    result = runner.invoke(
        cli,
        ['search',
        environ.get('SENTINEL_USER'),
        environ.get('SENTINEL_PASSWORD'),
        'tests/map.geojson']
        )

    assert result.exit_code == 0

    result = runner.invoke(
        cli,
        ['search',
        environ.get('SENTINEL_USER'),
        environ.get('SENTINEL_PASSWORD'),
        'tests/map.geojson',
        '--url', 'https://scihub.copernicus.eu/dhus/']
        )
    assert result.exit_code == 0

    result = runner.invoke(
        cli,
        ['search',
        environ.get('SENTINEL_USER'),
        environ.get('SENTINEL_PASSWORD'),
        'tests/map.geojson',
        '-q', 'producttype=GRD,polarisationmode=HH']
        )
    assert result.exit_code == 0
Example #18
0
def test_cli_shapes_bbox():
    """JSON text sequences of bboxes are output."""
    runner = CliRunner()
    result = runner.invoke(
        cli, ['shapes', '[106, 193, 9]', '--seq', '--bbox', '--mercator', '--precision', '3'])
    assert result.exit_code == 0
    assert result.output == '\x1e\n[-11740727.545, 4852834.052, -11662456.028, 4931105.569]\n'
Example #19
0
def test_cli_shapes_props_fid():
    runner = CliRunner()
    result = runner.invoke(
        cli, ['shapes', '{"tile": [106, 193, 9], "properties": {"title": "foo"}, "id": "42"}'])
    assert result.exit_code == 0
    assert '"title": "foo"' in result.output
    assert '"id": "42"' in result.output
Example #20
0
def test_cli_shapes_indentation():
    """Output is indented."""
    runner = CliRunner()
    result = runner.invoke(
        cli, ['shapes', '--indent', '8'], "[106, 193, 9]")
    assert result.exit_code == 0
    assert '        "type": "Feature"' in result.output.strip()
Example #21
0
def test_cli_shapes_collect():
    """Shapes are collected into a feature collection."""
    runner = CliRunner()
    result = runner.invoke(
        cli, ['shapes', '--collect', '--feature'], "[106, 193, 9]")
    assert result.exit_code == 0
    assert 'FeatureCollection' in result.output
Example #22
0
def test_cli_quadkey_failure():
    """Abort when an invalid quadkey is passed"""
    runner = CliRunner()
    result = runner.invoke(
        cli, ['quadkey', 'lolwut'])
    assert result.exit_code == 2
    assert "lolwut" in result.output
Example #23
0
def test_cli_tiles_geosjon():
    collection = '{"features": [{"geometry": {"coordinates": [[[-105.46875, 39.909736], [-105.46875, 40.446947], [-104.765625, 40.446947], [-104.765625, 39.909736], [-105.46875, 39.909736]]], "type": "Polygon"}, "id": "(106, 193, 9)", "properties": {"title": "XYZ tile (106, 193, 9)"}, "type": "Feature"}], "type": "FeatureCollection"}'
    runner = CliRunner()
    result = runner.invoke(
        cli, ['tiles', '9'], collection)
    assert result.exit_code == 0
    assert result.output == '[106, 193, 9]\n[106, 194, 9]\n'
Example #24
0
def test_cli_tiles_multi_bounds_seq():
    """A JSON text sequence can be used as input."""
    runner = CliRunner()
    result = runner.invoke(
        cli, ['tiles', '14'], '\x1e\n[-105, 39.99, -104.99, 40]\n\x1e\n[-105, 39.99, -104.99, 40]')
    assert result.exit_code == 0
    assert len(result.output.strip().split('\n')) == 4
Example #25
0
def test_cli_tiles_bounds():
    runner = CliRunner()
    result = runner.invoke(
        cli, ['tiles', '--with-bounds', '14'], '[-105, 39.99, -104.99, 40]')
    assert result.exit_code == 0
    first, last = result.output.strip().split('\n')
    assert [round(x, 3) for x in json.loads(first)][3:] == [-105.007, 39.994, -104.985, 40.011]
Example #26
0
def test_print_basic(monkeypatch):
    monkeypatch.setattr('boto.cloudformation.connect_to_region', lambda x: MagicMock())
    monkeypatch.setattr('boto.iam.connect_to_region', lambda x: MagicMock())

    data = {'SenzaInfo': {'StackName': 'test'}, 'SenzaComponents': [{'Configuration': {'Type': 'Senza::Configuration',
                                                                                       'ServerSubnets': {
                                                                                           'eu-west-1': [
                                                                                               'subnet-123']}}},
                                                                    {'AppServer': {
                                                                        'Type': 'Senza::TaupageAutoScalingGroup',
                                                                        'InstanceType': 't2.micro',
                                                                        'Image': 'AppImage',
                                                                        'TaupageConfig': {'runtime': 'Docker',
                                                                                          'source': 'foo/bar'}}}]}

    runner = CliRunner()

    with runner.isolated_filesystem():
        with open('myapp.yaml', 'w') as fd:
            yaml.dump(data, fd)

        result = runner.invoke(cli, ['print', 'myapp.yaml', '--region=myregion', '123', '1.0-SNAPSHOT'],
                               catch_exceptions=False)

    assert 'AWSTemplateFormatVersion' in result.output
    assert 'subnet-123' in result.output
Example #27
0
def test_print_replace_mustache(monkeypatch):
    sg = MagicMock()
    sg.name = 'app-master-mind'
    sg.id = 'sg-007'

    monkeypatch.setattr('boto.cloudformation.connect_to_region', lambda x: MagicMock())
    monkeypatch.setattr('boto.ec2.connect_to_region', lambda x: MagicMock(get_all_security_groups=lambda: [sg]))
    monkeypatch.setattr('boto.iam.connect_to_region', lambda x: MagicMock())
    data = {'SenzaInfo': {'StackName': 'test',
                          'Parameters': [{'ApplicationId': {'Description': 'Application ID from kio'}}]},
            'SenzaComponents': [{'Configuration': {'ServerSubnets': {'eu-west-1': ['subnet-123']},
                                                   'Type': 'Senza::Configuration'}},
                                {'AppServer': {'Image': 'AppImage',
                                               'InstanceType': 't2.micro',
                                               'SecurityGroups': ['app-{{Arguments.ApplicationId}}'],
                                               'IamRoles': ['app-{{Arguments.ApplicationId}}'],
                                               'TaupageConfig': {'runtime': 'Docker',
                                                                 'source': 'foo/bar'},
                                               'Type': 'Senza::TaupageAutoScalingGroup'}}]
            }

    runner = CliRunner()

    with runner.isolated_filesystem():
        with open('myapp.yaml', 'w') as fd:
            yaml.dump(data, fd)

        result = runner.invoke(cli, ['print', 'myapp.yaml', '--region=myregion', '123', 'master-mind'],
                               catch_exceptions=False)
    assert 'AWSTemplateFormatVersion' in result.output
    assert 'subnet-123' in result.output
    assert 'app-master-mind' in result.output
    assert 'sg-007' in result.output
Example #28
0
def test_cli_tiles_point_geojson():
    runner = CliRunner()
    result = runner.invoke(
        cli, ['tiles', '14'],
        '{"type":"geometry","coordinates":[14.0859, 5.798]}')
    assert result.exit_code == 0
    assert result.output == '[8833, 7927, 14]\n'
Example #29
0
def test_delete(monkeypatch):
    cf = MagicMock()
    stack = MagicMock(stack_name='test-1')
    cf.list_stacks.return_value = [stack]
    monkeypatch.setattr('boto.cloudformation.connect_to_region', lambda x: cf)
    monkeypatch.setattr('boto.iam.connect_to_region', lambda x: MagicMock())

    runner = CliRunner()

    data = {'SenzaInfo': {'StackName': 'test'}}

    with runner.isolated_filesystem():
        with open('myapp.yaml', 'w') as fd:
            yaml.dump(data, fd)
        result = runner.invoke(cli, ['delete', 'myapp.yaml', '--region=myregion', '1'],
                               catch_exceptions=False)
        assert 'OK' in result.output

        cf.list_stacks.return_value = [stack, stack]
        result = runner.invoke(cli, ['delete', 'myapp.yaml', '--region=myregion'],
                               catch_exceptions=False)
        assert 'Please use the "--force" flag if you really want to delete multiple stacks' in result.output

        result = runner.invoke(cli, ['delete', 'myapp.yaml', '--region=myregion', '--force'],
                               catch_exceptions=False)
        assert 'OK' in result.output
Example #30
0
def test_cli_shapes_compact():
    """Output is compact."""
    runner = CliRunner()
    result = runner.invoke(
        cli, ['shapes', '--compact'], "[106, 193, 9]")
    assert result.exit_code == 0
    assert '"type":"Feature"' in result.output.strip()
Example #31
0
 def test_help(self, help_flag, call_mock, fake_kedro_cli,
               fake_ipython_message):
     result = CliRunner().invoke(fake_kedro_cli.cli, ["ipython", help_flag])
     assert not result.exit_code, result.stdout
     fake_ipython_message.assert_not_called()
     call_mock.assert_called_once_with(["ipython", help_flag])
def test_config():
    runner = CliRunner()
    result = runner.invoke(config)
    assert result.exit_code == 0
    config_dict = json.loads(result.output)
    assert len(config_dict) >= 5
def test_viz():
    runner = CliRunner()
    result = runner.invoke(viz)
    assert result.exit_code == 0
    assert result.output.startswith('vi')
    assert result.output.endswith('z!\n')
Example #34
0
def test_publish_heroku_invalid_database(mock_which):
    mock_which.return_value = True
    runner = CliRunner()
    result = runner.invoke(cli.cli, ["publish", "heroku", "woop.db"])
    assert result.exit_code == 2
    assert 'Path "woop.db" does not exist' in result.output
Example #35
0
def cli_runner():
    os.environ["GITGUARDIAN_API_KEY"] = os.getenv(
        "TEST_GITGUARDIAN_API_KEY", "1234567890"
    )
    os.environ["GITGUARDIAN_API_URL"] = "https://api.gitguardian.com/"
    return CliRunner()
Example #36
0
    def test_show(self):
        runner = CliRunner()
        result = runner.invoke(config_group, ["show"])

        assert result.exit_code == 0
        assert result.output.strip() == "To be implemented!"
Example #37
0
def test_cli_version():
    runner = CliRunner()
    result = runner.invoke(cli, ['--version'])
    assert result.exit_code == 0
    assert len(result.output.split('.')) == 3
Example #38
0
 def test_happy_path(self, fake_kedro_cli, python_call_mock):
     result = CliRunner().invoke(fake_kedro_cli.cli,
                                 ["test", "--random-arg", "value"])
     assert not result.exit_code
     python_call_mock.assert_called_once_with("pytest",
                                              ("--random-arg", "value"))
Example #39
0
    def test_init_in_existing_repo(self):
        runner = CliRunner()
        with runner.isolated_filesystem():
            repoDir = "./arepo"
            repo = createUnrelatedRepo(repoDir)
            os.chdir(repoDir)
            # override home so to avoid interferring with other tests
            result = runner.invoke(
                cli,
                [
                    "--home",
                    "../unfurl_home",
                    "init",
                    "--existing",
                    "--mono",
                    "deploy_dir",
                ],
            )
            # uncomment this to see output:
            # print("result.output", result.exit_code, result.output)

            assert not result.exception, "\n".join(
                traceback.format_exception(*result.exc_info)
            )
            self.assertEqual(result.exit_code, 0, result)
            expectedCommittedFiles = {
                "unfurl.yaml",
                "ensemble-template.yaml",
                ".gitignore",
                ".gitattributes",
            }
            expectedFiles = expectedCommittedFiles | {"local", "ensemble"}
            self.assertEqual(set(os.listdir("deploy_dir")), expectedFiles)
            files = set(_path for (_path, _stage) in repo.index.entries)
            expectedCommittedFiles.add("ensemble/ensemble.yaml")
            expected = {"deploy_dir/" + f for f in expectedCommittedFiles}
            expected.add("README")  # the original file in the repo
            self.assertEqual(files, expected)
            # for n in expectedFiles:
            #     with open("deploy_dir/" + n) as f:
            #         print(n)
            #         print(f.read())

            with open("deploy_dir/ensemble/ensemble.yaml", "w") as f:
                f.write(manifestContent)

            result = runner.invoke(
                cli,
                [
                    "git",
                    "--dir",
                    "deploy_dir",
                    "commit",
                    "-m",
                    "update manifest",
                    "deploy_dir/ensemble/ensemble.yaml",
                ],
            )
            # uncomment this to see output:
            # print("commit result.output", result.exit_code, result.output)
            assert not result.exception, "\n".join(
                traceback.format_exception(*result.exc_info)
            )

            # "-vvv",
            args = ["deploy", "deploy_dir", "--jobexitcode", "degraded"]
            result = runner.invoke(cli, args)
            # print("result.output", result.exit_code, result.output)
            assert not result.exception, "\n".join(
                traceback.format_exception(*result.exc_info)
            )
            self.assertEqual(result.exit_code, 0, result)
Example #40
0
 def test_pythonpath_env_var(self, fake_kedro_cli, mocker, fake_repo_path):
     mocked_environ = mocker.patch("os.environ", {})
     CliRunner().invoke(fake_kedro_cli.cli, ["lint"])
     assert mocked_environ == {"PYTHONPATH": str(fake_repo_path / "src")}
Example #41
0
def test_agent_install_fails_non_valid_agent(cloud_api):
    runner = CliRunner()
    result = runner.invoke(agent, ["install", "fake_agent"])
    assert result.exit_code == 0
    assert "fake_agent is not a supported agent for `install`" in result.output
Example #42
0
    def test_home_template(self):
        runner = CliRunner()
        with runner.isolated_filesystem():
            # override home so to avoid interferring with other tests
            result = runner.invoke(cli, ["--home", "./unfurl_home", "home", "--init"])
            # uncomment this to see output:
            # print("result.output", result.exit_code, result.output)
            assert not result.exception, "\n".join(
                traceback.format_exception(*result.exc_info)
            )

            assert not os.path.exists("./unfurl_home/.tool_versions")
            makeAsdfFixtures("test_asdf")
            os.environ["ASDF_DATA_DIR"] = os.path.abspath("test_asdf")
            args = [
                #  "-vvv",
                "deploy",
                "./unfurl_home",
                "--jobexitcode",
                "degraded",
            ]
            result = runner.invoke(cli, args)
            # print("result.output", result.exit_code, result.output)
            assert not result.exception, "\n".join(
                traceback.format_exception(*result.exc_info)
            )
            self.assertEqual(result.exit_code, 0, result)

            assert os.path.exists("unfurl_home/.tool-versions")
            assert LocalEnv("unfurl_home").getManifest()
            paths = os.environ["PATH"].split(os.pathsep)
            assert len(paths) >= len(installDirs)
            for dirs, path in zip(installDirs, paths):
                self.assertIn(os.sep.join(dirs), path)

            # test that projects are registered in the home
            # use this project because the repository it is in has a non-local origin set:
            project = os.path.join(
                os.path.dirname(__file__), "examples/testimport-manifest.yaml"
            )
            # set starttime to suppress job logging to file
            result = runner.invoke(
                cli, ["--home", "./unfurl_home", "plan", "--starttime=1", project]
            )
            # print("result.output", result.exit_code, result.output)
            assert not result.exception, "\n".join(
                traceback.format_exception(*result.exc_info)
            )
            self.assertEqual(result.exit_code, 0, result)

            # assert added to projects
            basedir = os.path.dirname(os.path.dirname(__file__))
            repo = GitRepo(Repo(basedir))
            gitUrl = repo.url
            # travis-ci does a shallow clone so it doesn't have the initial initial revision
            initial = repo.getInitialRevision()
            with open("./unfurl_home/unfurl.yaml") as f:
                contents = f.read()
                for line in [
                    "_examples:",
                    "url: " + gitUrl,
                    "initial: " + initial,
                    "file: tests/examples",
                ]:
                    self.assertIn(line, contents)

            # assert added to localRepositories
            with open("./unfurl_home/local/unfurl.yaml") as f:
                contents = f.read()
                for line in [
                    "url: " + normalizeGitUrl(gitUrl),
                    "initial: " + initial,
                ]:
                    self.assertIn(line, contents)
                self.assertNotIn("origin:", contents)

            externalProjectManifest = """
apiVersion: unfurl/v1alpha1
kind: Ensemble
context:
 external:
  test:
    manifest:
      file: testimport-manifest.yaml
      project: _examples
spec:
  service_template:
    topology_template:
      node_templates:
        testNode:
          type: tosca.nodes.Root
          properties:
            externalEnsemble:
              eval:
                external: test
"""
            with open("externalproject.yaml", "w") as f:
                f.write(externalProjectManifest)

            result = runner.invoke(
                cli, ["--home", "./unfurl_home", "plan", "externalproject.yaml"]
            )
            # print("result.output", result.exit_code, result.output)
            assert not result.exception, "\n".join(
                traceback.format_exception(*result.exc_info)
            )
            self.assertEqual(result.exit_code, 0, result)

            # assert that we loaded the external ensemble from test "_examples" project
            # and we're able to reference its outputs
            assert _latestJobs
            testNode = _latestJobs[-1].rootResource.findResource("testNode")
            assert testNode and testNode.attributes["externalEnsemble"]
            externalEnsemble = testNode.attributes["externalEnsemble"]
            assert "aOutput" in externalEnsemble.outputs.attributes
            # make sure we loaded it from the source (not a local checkout)
            assert externalEnsemble.baseDir.startswith(os.path.dirname(__file__))
Example #43
0
def test_tables_fts5(db_path):
    Database(db_path)["Gosh"].enable_fts(["c2"], fts_version="FTS5")
    result = CliRunner().invoke(cli.cli, ["tables", "--fts5", db_path])
    assert '[{"table": "Gosh_fts"}]' == result.output.strip()
Example #44
0
def test_help(cmd):
    args = cmd.split()
    result = CliRunner().invoke(cli, args + ["-h"])
    assert result.exit_code == 0
Example #45
0
def test_tables(db_path):
    result = CliRunner().invoke(cli.cli, ["tables", db_path])
    assert '[{"table": "Gosh"},\n {"table": "Gosh2"}]' == result.output.strip()
Example #46
0
def test_agent_start(
    name, import_path, extra_cmd, extra_kwargs, deprecated, monkeypatch
):
    if name == "ecs" and deprecated:
        pytest.skip("No deprecated version for ECS agent")

    command = ["start", name] if deprecated else [name, "start"]
    command.extend(
        (
            "--token TEST-TOKEN --api TEST-API --agent-config-id TEST-AGENT-CONFIG-ID "
            "--name TEST-NAME -l label1 -l label2 -e KEY1=VALUE1 -e KEY2=VALUE2 "
            "--max-polls 10 --agent-address 127.0.0.1:8080"
        ).split()
    )
    if deprecated:
        command.append("--verbose")
    else:
        command.extend(["--log-level", "debug"])
    if not isinstance(extra_cmd, str):
        extra_cmd = extra_cmd[0] if deprecated else " ".join(extra_cmd)
    command.extend(extra_cmd.split())

    if not isinstance(extra_kwargs, dict):
        extra_kwargs = (
            extra_kwargs[0]
            if deprecated
            else dict(**extra_kwargs[0], **extra_kwargs[1])
        )

    expected_kwargs = {
        "agent_config_id": "TEST-AGENT-CONFIG-ID",
        "name": "TEST-NAME",
        "labels": ["label1", "label2"],
        "env_vars": {"KEY1": "VALUE1", "KEY2": "VALUE2"},
        "max_polls": 10,
        "agent_address": "127.0.0.1:8080",
        "no_cloud_logs": False,
        **extra_kwargs,
    }

    agent_obj = MagicMock()

    def check_config(*args, **kwargs):
        assert prefect.config.cloud.agent.auth_token == "TEST-TOKEN"
        assert prefect.config.cloud.agent.level == "DEBUG"
        assert prefect.config.cloud.api == "TEST-API"
        return agent_obj

    module, cls_name = import_path.rsplit(".", 1)
    cls = getattr(import_module(module), cls_name)
    agent_cls = create_autospec(cls, side_effect=check_config)
    monkeypatch.setattr(import_path, agent_cls)

    result = CliRunner().invoke(agent, command)
    if deprecated:
        if name == "fargate":
            assert f"Warning: The Fargate agent is deprecated" in result.output
        else:
            assert (
                f"Warning: `prefect agent start {name}` is deprecated" in result.output
            )

    kwargs = agent_cls.call_args[1]
    for k, v in expected_kwargs.items():
        assert kwargs[k] == v
    assert agent_obj.start.called
 def setUpClass(cls):
     cls.runner = CliRunner()
     cls.runner.invoke(cli, ['login', "--user", "*****@*****.**", "--passwd", "Pippo123"])
Example #48
0
def test_vacuum(db_path):
    result = CliRunner().invoke(cli.cli, ["vacuum", db_path])
    assert 0 == result.exit_code
Example #49
0
    def test_help(self) -> None:
        """
        Help text is shown with `dcos-docker run --help`.
        """
        runner = CliRunner()
        result = runner.invoke(
            dcos_docker,
            ['run', '--help'],
            catch_exceptions=False,
        )
        assert result.exit_code == 0
        # yapf breaks multi-line noqa, see
        # https://github.com/google/yapf/issues/524.
        # yapf: disable
        expected_help = dedent(
            """\
            Usage: dcos-docker run [OPTIONS] NODE_ARGS...

              Run an arbitrary command on a node.

              This command sets up the environment so that ``pytest`` can be run.

              For example, run ``dcos-docker run --cluster-id 1231599 pytest -k
              test_tls.py``.

              Or, with sync: ``dcos-docker run --sync-dir . --cluster-id 1231599 pytest -k
              test_tls.py``.

              To use special characters such as single quotes in your command, wrap the
              whole command in double quotes.

            Options:
              -c, --cluster-id TEXT          The ID of the cluster to use.  [default:
                                             default]
              --dcos-login-uname TEXT        The username to set the ``DCOS_LOGIN_UNAME``
                                             environment variable to.
              --dcos-login-pw TEXT           The password to set the ``DCOS_LOGIN_PW``
                                             environment variable to.
              --sync-dir PATH                The path to a DC/OS checkout. Part of this
                                             checkout will be synced to all master nodes
                                             before the command is run.
              --no-test-env                  With this flag set, no environment variables
                                             are set and the command is run in the home
                                             directory.
              --node TEXT                    A reference to a particular node to run the
                                             command on. This can be one of: The node's IP
                                             address, the node's Docker container name, the
                                             node's Docker container ID, a reference in the
                                             format "<role>_<number>". These details be seen
                                             with ``dcos_docker inspect``.
              --env TEXT                     Set environment variables in the format
                                             "<KEY>=<VALUE>"
              --transport [docker-exec|ssh]  The communication transport to use. On macOS
                                             the SSH transport requires IP routing to be set
                                             up. See "dcos-docker setup-mac-network".It also
                                             requires the "ssh" command to be available.
                                             This can be provided by setting the
                                             `DCOS_DOCKER_TRANSPORT` environment variable.
                                             [default: docker-exec]
              --help                         Show this message and exit.
            """,# noqa: E501,E261
        )
        # yapf: enable
        assert result.output == expected_help
Example #50
0
def test_purge_config_cache():
    runner = CliRunner()

    result = runner.invoke(main, ['purge-config-cache'])
    assert result.exit_code == 0
Example #51
0
class TestConfigReader(object):
    @patch("sceptre.config.reader.ConfigReader._check_valid_project_path")
    def setup_method(self, test_method, mock_check_valid_project_path):
        self.runner = CliRunner()
        self.test_project_path = os.path.join(
            os.getcwd(), "tests", "fixtures"
        )
        self.context = SceptreContext(
            project_path=self.test_project_path,
            command_path="A"
        )

    def test_config_reader_correctly_initialised(self):
        config_reader = ConfigReader(self.context)
        assert config_reader.context == self.context

    def test_config_reader_with_invalid_path(self):
        with pytest.raises(InvalidSceptreDirectoryError):
            ConfigReader(SceptreContext("/path/does/not/exist", "example"))

    def create_project(self):
        """
        Creates a new random temporary directory with a config subdirectory
        """
        with self.runner.isolated_filesystem():
            project_path = os.path.abspath('./example')
        config_dir = os.path.join(project_path, "config")
        os.makedirs(config_dir)
        return (project_path, config_dir)

    def write_config(self, abs_path, config):
        """
        Writes a configuration dict to the specified path as YAML
        """
        if abs_path.endswith(".yaml"):
            dir_path = os.path.split(abs_path)[0]
        if not os.path.exists(dir_path):
            try:
                os.makedirs(dir_path)
            except OSError as exc:
                if exc.errno != errno.EEXIST:
                    raise

        with open(abs_path, 'w') as config_file:
            yaml.safe_dump(
                config, stream=config_file, default_flow_style=False
            )

    @pytest.mark.parametrize("filepaths,target", [
        (
            ["A/1.yaml"], "A/1.yaml"
        ),
        (
            ["A/1.yaml", "A/B/1.yaml"], "A/B/1.yaml"
        ),
        (
            ["A/1.yaml", "A/B/1.yaml", "A/B/C/1.yaml"], "A/B/C/1.yaml"
        )
    ])
    def test_read_reads_config_file(self, filepaths, target):
        project_path, config_dir = self.create_project()

        for rel_path in filepaths:
            config = {"filepath": rel_path}
            abs_path = os.path.join(config_dir, rel_path)
            self.write_config(abs_path, config)

        self.context.project_path = project_path
        config = ConfigReader(self.context).read(target)

        assert config == {
            "project_path": project_path,
            "stack_group_path": os.path.split(target)[0],
            "filepath": target
        }

    def test_read_reads_config_file_with_base_config(self):
        with self.runner.isolated_filesystem():
            project_path = os.path.abspath('./example')
            config_dir = os.path.join(project_path, "config")
            stack_group_dir = os.path.join(config_dir, "A")

            os.makedirs(stack_group_dir)

            config = {"config": "config"}
            with open(os.path.join(stack_group_dir, "stack.yaml"), 'w') as\
                    config_file:
                yaml.safe_dump(
                    config, stream=config_file, default_flow_style=False
                )

            base_config = {
                "base_config": "base_config"
            }
            self.context.project_path = project_path
            config = ConfigReader(self.context).read(
                "A/stack.yaml", base_config
            )

            assert config == {
                "project_path": project_path,
                "stack_group_path": "A",
                "config": "config",
                "base_config": "base_config"
            }

    def test_read_with_nonexistant_filepath(self):
        project_path, config_dir = self.create_project()
        self.context.project_path = project_path
        with pytest.raises(ConfigFileNotFoundError):
            ConfigReader(self.context).read("stack.yaml")

    def test_read_with_empty_config_file(self):
        config_reader = ConfigReader(self.context)
        config = config_reader.read(
            "account/stack-group/region/subnets.yaml"
        )
        assert config == {
            "project_path": self.test_project_path,
            "stack_group_path": "account/stack-group/region"
        }

    def test_read_with_templated_config_file(self):
        self.context.user_variables = {"variable_key": "user_variable_value"}
        config_reader = ConfigReader(self.context)

        config_reader.templating_vars["stack_group_config"] = {
            "region": "region_region",
            "project_code": "account_project_code",
            "required_version": "'>1.0'",
            "template_bucket_name": "stack_group_template_bucket_name"
        }
        os.environ["TEST_ENV_VAR"] = "environment_variable_value"
        config = config_reader.read(
            "account/stack-group/region/security_groups.yaml"
        )

        assert config == {
            'project_path': self.context.project_path,
            "stack_group_path": "account/stack-group/region",
            "parameters": {
                "param1": "user_variable_value",
                "param2": "environment_variable_value",
                "param3": "region_region",
                "param4": "account_project_code",
                "param5": ">1.0",
                "param6": "stack_group_template_bucket_name"
            }
        }

    def test_aborts_on_incompatible_version_requirement(self):
        config = {
            'required_version': '<0'
        }
        with pytest.raises(VersionIncompatibleError):
            ConfigReader(self.context)._check_version(config)

    @freeze_time("2012-01-01")
    @pytest.mark.parametrize("stack_name,config,expected", [
        (
            "name",
            {
                "template_bucket_name": "bucket-name",
                "template_key_prefix": "prefix",
                "region": "eu-west-1"
            },
            {
                "bucket_name": "bucket-name",
                "bucket_key": "prefix/name/2012-01-01-00-00-00-000000Z.json",
                "bucket_region": "eu-west-1",
            }
        ),
        (
            "name",
            {
                "template_bucket_name": "bucket-name",
                "region": "eu-west-1"
            },
            {
                "bucket_name": "bucket-name",
                "bucket_key": "name/2012-01-01-00-00-00-000000Z.json",
                "bucket_region": "eu-west-1",
            }
        ),
        (
            "name",
            {
                "template_bucket_name": "bucket-name",
            },
            {
                "bucket_name": "bucket-name",
                "bucket_key": "name/2012-01-01-00-00-00-000000Z.json",
                "bucket_region": None,
            }
        ),
        (
            "name", {}, None
        )
    ]
    )
    def test_collect_s3_details(self, stack_name, config, expected):
        details = ConfigReader._collect_s3_details(stack_name, config)
        assert details == expected

    @patch("sceptre.config.reader.ConfigReader._collect_s3_details")
    @patch("sceptre.config.reader.Stack")
    def test_construct_stacks_constructs_stack(
        self, mock_Stack, mock_collect_s3_details
    ):
        mock_Stack.return_value = sentinel.stack
        sentinel.stack.dependencies = []

        mock_collect_s3_details.return_value = sentinel.s3_details
        self.context.project_path = os.path.abspath("tests/fixtures-vpc")
        self.context.command_path = "account/stack-group/region/vpc.yaml"
        stacks = ConfigReader(self.context).construct_stacks()
        mock_Stack.assert_any_call(
            name="account/stack-group/region/vpc",
            project_code="account_project_code",
            template_path=os.path.join(
                self.context.project_path, "templates/path/to/template"
            ),
            region="region_region",
            profile="account_profile",
            parameters={"param1": "val1"},
            sceptre_user_data={},
            hooks={},
            s3_details=sentinel.s3_details,
            dependencies=["child/level", "top/level"],
            iam_role=None,
            role_arn=None,
            protected=False,
            tags={},
            external_name=None,
            notifications=None,
            on_failure=None,
            stack_timeout=0,
            required_version='>1.0',
            template_bucket_name='stack_group_template_bucket_name',
            template_key_prefix=None,
            stack_group_config={
                "custom_key": "custom_value"
            }
        )

        assert stacks == ({sentinel.stack}, {sentinel.stack})

    @pytest.mark.parametrize("filepaths,expected_stacks", [
        (["A/1.yaml"], {"A/1"}),
        (["A/1.yaml", "A/2.yaml", "A/3.yaml"], {"A/3", "A/2", "A/1"}),
        (["A/1.yaml", "A/A/1.yaml"], {"A/1", "A/A/1"}),
        (["A/1.yaml", "A/A/1.yaml", "A/A/2.yaml"], {"A/1", "A/A/1", "A/A/2"}),
        (["A/A/1.yaml", "A/B/1.yaml"], {"A/A/1", "A/B/1"})
    ])
    def test_construct_stacks_with_valid_config(
        self, filepaths, expected_stacks
    ):
        project_path, config_dir = self.create_project()

        for rel_path in filepaths:

            config = {
                "region": "region",
                "project_code": "project_code",
                "template_path": rel_path
            }

            abs_path = os.path.join(config_dir, rel_path)
            self.write_config(abs_path, config)

        self.context.project_path = project_path
        config_reader = ConfigReader(self.context)
        all_stacks, command_stacks = config_reader.construct_stacks()
        assert {str(stack) for stack in all_stacks} == expected_stacks

    @pytest.mark.parametrize("filepaths, del_key", [
        (["A/1.yaml"], "project_code"),
        (["A/1.yaml"], "region"),
        (["A/1.yaml"], "template_path"),
    ])
    def test_missing_attr(
        self, filepaths, del_key
    ):
        project_path, config_dir = self.create_project()

        for rel_path in filepaths:

            config = {
                "project_code": "project_code",
                "region": "region",
                "template_path": rel_path
            }
            # Delete the mandatory key to be tested.
            del config[del_key]

            abs_path = os.path.join(config_dir, rel_path)
            self.write_config(abs_path, config)

        self.context.project_path = project_path
        try:
            config_reader = ConfigReader(self.context)
            all_stacks, command_stacks = config_reader.construct_stacks()
        except InvalidConfigFileError as e:
            # Test that the missing key is reported.
            assert del_key in str(e)
        except Exception:
            raise
        else:
            assert False

    @pytest.mark.parametrize("filepaths, dependency", [
        (["A/1.yaml", "B/1.yaml", "B/2.yaml"], "A/1.yaml"),
        (["A/1.yaml", "B/1.yaml", "B/2.yaml"], "B/1.yaml"),
    ])
    def test_existing_dependency(
        self, filepaths, dependency
    ):
        project_path, config_dir = self.create_project()

        for rel_path in filepaths:
            # Set up config with reference to an existing stack
            config = {
                "project_code": "project_code",
                "region": "region",
                "template_path": rel_path,
                "dependencies": [dependency]
            }

            abs_path = os.path.join(config_dir, rel_path)
            self.write_config(abs_path, config)

        self.context.project_path = project_path
        try:
            config_reader = ConfigReader(self.context)
            all_stacks, command_stacks = config_reader.construct_stacks()
        except Exception:
            raise
        else:
            assert True

    @pytest.mark.parametrize("filepaths, dependency", [
        (["A/1.yaml", "B/1.yaml", "B/2.yaml"], "A/2.yaml"),
        (["A/1.yaml", "B/1.yaml", "B/2.yaml"], "1.yaml"),
    ])
    def test_missing_dependency(
        self, filepaths, dependency
    ):
        project_path, config_dir = self.create_project()

        for rel_path in filepaths:
            # Set up config with reference to non-existing stack
            config = {
                "project_code": "project_code",
                "region": "region",
                "template_path": rel_path,
                "dependencies": [dependency]
            }

            abs_path = os.path.join(config_dir, rel_path)
            self.write_config(abs_path, config)

        self.context.project_path = project_path
        try:
            config_reader = ConfigReader(self.context)
            all_stacks, command_stacks = config_reader.construct_stacks()
        except DependencyDoesNotExistError as e:
            # Test that the missing dependency is reported.
            assert dependency in str(e)
        except Exception:
            raise
        else:
            assert False

    def test_resolve_node_tag(self):
        mock_loader = MagicMock(yaml.Loader)
        mock_loader.resolve.return_value = "new_tag"

        mock_node = MagicMock(yaml.Node)
        mock_node.tag = "old_tag"
        mock_node.value = "String"

        config_reader = ConfigReader(self.context)
        new_node = config_reader.resolve_node_tag(mock_loader, mock_node)

        assert new_node.tag == 'new_tag'
Example #52
0
 def setUp(self):
     super().setUp()
     self.runner = CliRunner()
Example #53
0
 def test_broken_symlink(self) -> None:
     with cache_dir() as workspace:
         symlink = workspace / "broken_link.py"
         symlink.symlink_to("nonexistent.py")
         result = CliRunner().invoke(black.main, [str(workspace.resolve())])
         self.assertEqual(result.exit_code, 0)
Example #54
0
    def test_help(self) -> None:
        """
        Help text is shown with `dcos-docker create --help`.
        """
        runner = CliRunner()
        result = runner.invoke(
            dcos_docker,
            ['create', '--help'],
            catch_exceptions=False,
        )
        assert result.exit_code == 0
        # yapf breaks multi-line noqa, see
        # https://github.com/google/yapf/issues/524.
        # yapf: disable
        expected_help = dedent(
            """\
            Usage: dcos-docker create [OPTIONS] ARTIFACT

              Create a DC/OS cluster.

                  DC/OS Enterprise

                              DC/OS Enterprise clusters require different configuration variables to DC/OS OSS.
                              For example, enterprise clusters require the following configuration parameters:

                      ``superuser_username``, ``superuser_password_hash``,
                      ``fault_domain_enabled``, ``license_key_contents``

                              These can all be set in ``--extra-config``.
                              However, some defaults are provided for all but the license key.

                              The default superuser username is ``admin``.
                              The default superuser password is ``admin``.
                              The default ``fault_domain_enabled`` is ``false``.

                              ``license_key_contents`` must be set for DC/OS Enterprise 1.11 and above.
                              This is set to one of the following, in order:

                              * The ``license_key_contents`` set in ``--extra-config``.
                              * The contents of the path given with ``--license-key``.
                              * The contents of the path set in the ``DCOS_LICENSE_KEY_PATH`` environment variable.

                              If none of these are set, ``license_key_contents`` is not given.

            Options:
              --docker-version [1.11.2|1.13.1|17.12.1-ce]
                                              The Docker version to install on the nodes.
                                              [default: 1.13.1]
              --linux-distribution [centos-7|coreos|ubuntu-16.04]
                                              The Linux distribution to use on the nodes.
                                              [default: centos-7]
              --docker-storage-driver [aufs|overlay|overlay2]
                                              The storage driver to use for Docker in
                                              Docker. By default this uses the host's
                                              driver.
              --masters INTEGER               The number of master nodes.  [default: 1]
              --agents INTEGER                The number of agent nodes.  [default: 1]
              --public-agents INTEGER         The number of public agent nodes.  [default:
                                              1]
              --extra-config PATH             The path to a file including DC/OS
                                              configuration YAML. The contents of this file
                                              will be added to add to a default
                                              configuration.
              --security-mode [disabled|permissive|strict]
                                              The security mode to use for a DC/OS
                                              Enterprise cluster. This overrides any
                                              security mode set in ``--extra-config``.
              -c, --cluster-id TEXT           A unique identifier for the cluster. Use the
                                              value "default" to use this cluster for other
                                              commands without specifying --cluster-id.
              --license-key PATH              This is ignored if using open source DC/OS. If
                                              using DC/OS Enterprise, this defaults to the
                                              value of the `DCOS_LICENSE_KEY_PATH`
                                              environment variable.
              --genconf-dir PATH              Path to a directory that contains additional
                                              files for DC/OS installer. All files from this
                                              directory will be copied to the `genconf`
                                              directory before running DC/OS installer.
              --copy-to-master TEXT           Files to copy to master nodes before
                                              installing DC/OS. This option can be given
                                              multiple times. Each option should be in the
                                              format /absolute/local/path:/remote/path.
              --workspace-dir PATH            Creating a cluster can use approximately 2 GB
                                              of temporary storage. Set this option to use a
                                              custom "workspace" for this temporary storage.
                                              See https://docs.python.org/3/library/tempfile
                                              .html#tempfile.gettempdir for details on the
                                              temporary directory location if this option is
                                              not set.
              --custom-volume TEXT            Bind mount a volume on all cluster node
                                              containers. See https://docs.docker.com/engine
                                              /reference/run/#volume-shared-filesystems for
                                              the syntax to use.
              --custom-master-volume TEXT     Bind mount a volume on all cluster master node
                                              containers. See https://docs.docker.com/engine
                                              /reference/run/#volume-shared-filesystems for
                                              the syntax to use.
              --custom-agent-volume TEXT      Bind mount a volume on all cluster agent node
                                              containers. See https://docs.docker.com/engine
                                              /reference/run/#volume-shared-filesystems for
                                              the syntax to use.
              --custom-public-agent-volume TEXT
                                              Bind mount a volume on all cluster public
                                              agent node containers. See https://docs.docker
                                              .com/engine/reference/run/#volume-shared-
                                              filesystems for the syntax to use.
              --variant [auto|oss|enterprise]
                                              Choose the DC/OS variant. If the variant does
                                              not match the variant of the given artifact,
                                              an error will occur. Using "auto" finds the
                                              variant from the artifact. Finding the variant
                                              from the artifact takes some time and so using
                                              another option is a performance optimization.
              --wait-for-dcos                 Wait for DC/OS after creating the cluster.
                                              This is equivalent to using "dcos-docker wait"
                                              after this command. "dcos-docker wait" has
                                              various options available and so may be more
                                              appropriate for your use case. If the chosen
                                              transport is "docker-exec", this will skip
                                              HTTP checks and so the cluster may not be
                                              fully ready.
              --network TEXT                  The Docker network containers will be
                                              connected to.It may not be possible to SSH to
                                              containers on a custom network on macOS.
              --transport [docker-exec|ssh]   The communication transport to use. On macOS
                                              the SSH transport requires IP routing to be
                                              set up. See "dcos-docker setup-mac-network".It
                                              also requires the "ssh" command to be
                                              available. This can be provided by setting the
                                              `DCOS_DOCKER_TRANSPORT` environment variable.
                                              [default: docker-exec]
              --help                          Show this message and exit.
            """,# noqa: E501,E261
        )
        # yapf: enable
        assert result.output == expected_help
Example #55
0
 def __init__(self, methodName='runTest'):
     super(TestCC, self).__init__()
     self.runner = CliRunner()
Example #56
0
class VCRIntegrationTest(IntegrationTestCase):
    @property
    def region(self):
        return os.environ["AWS_DEFAULT_REGION"]

    @property
    def domain(self):
        return os.environ["SWF_DOMAIN"]

    @property
    def workflow_id(self):
        return WORKFLOW_ID

    @property
    def conn(self):
        if not hasattr(self, "_conn"):
            self._conn = boto.swf.connect_to_region(self.region)
        return self._conn

    def get_events(self, run_id):
        response = self.conn.get_workflow_execution_history(
            self.domain,
            run_id,
            self.workflow_id,
        )
        events = response['events']
        next_page = response.get('nextPageToken')
        while next_page is not None:
            response = self.conn.get_workflow_execution_history(
                self.domain,
                run_id,
                self.workflow_id,
                next_page_token=next_page,
            )

            events.extend(response['events'])
            next_page = response.get('nextPageToken')
        return events

    def invoke(self, command, arguments):
        # type: (str, Union(str, List[str])) -> Result
        if not hasattr(self, "runner"):
            self.runner = CliRunner()
        if isinstance(arguments, str):
            arguments = arguments.split(" ")
        print('simpleflow {} {}'.format(command, ' '.join(arguments)))
        return self.runner.invoke(command, arguments, catch_exceptions=False)

    def run_standalone(self, workflow_name, *args, **kwargs):
        input = json_dumps(dict(args=args, kwargs=kwargs))
        result = self.invoke(
            simpleflow.command.cli,
            [
                "standalone",
                "--workflow-id",
                str(self.workflow_id),
                "--input",
                input,
                "--nb-deciders",
                "2",
                "--nb-workers",
                "2",
                workflow_name,
            ],
        )
        expect(result.exit_code).to.equal(0)
        lines = result.output.split("\n")
        start_line = [
            line for line in lines if line.startswith(self.workflow_id)
        ][0]
        _, run_id = start_line.split(" ", 1)

        events = self.get_events(run_id)
        return events
Example #57
0
def test_command_line_interface():
    """Test the CLI."""
    runner = CliRunner()
    help_result = runner.invoke(cli.main, ["--help"])
    assert help_result.exit_code == 0
    assert "--help" in help_result.output
Example #58
0
 def test_no_files(self) -> None:
     with cache_dir():
         # Without an argument, black exits with error code 0.
         result = CliRunner().invoke(black.main, [])
         self.assertEqual(result.exit_code, 0)
Example #59
0
import sys

import click
from click.testing import CliRunner

from . import core


def is_debugging():
    return not (sys.gettrace() is None)


@click.command()
@click.argument('path', type=click.Path(exists=True))
@click.argument('transmission_url')
@click.argument('torrentleech_username', envvar='TORRENTLEECH_USERNAME')
@click.argument('torrentleech_password', envvar='TORRENTLEECH_PASSWORD')
@click.argument('torrentleech_rss_key', envvar='TORRENTLEECH_RSS_KEY')
def add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key):
    """Console script for media_server_utils."""
    core.add_torrents_from_folder(path, transmission_url, torrentleech_username, torrentleech_password, torrentleech_rss_key)
    return 0


if __name__ == "__main__":
    if is_debugging():
        runner = CliRunner()
        runner.invoke(add_torrents_from_folder)
    else:
        add_torrents_from_folder()
Example #60
0
class BaseCliTestCase(SimpleTestCase):
    mock_requests = True  # Flag if the testcase should mock out requests library.
    isolate = False  # Flag if the test should run in isolated environment.

    def _pre_setup(self):
        # Use Mocket to prevent any real network access from tests
        Mocket.enable()

        # Mock at the level of requests library.
        # Connectivity tests can use lower level mocks at socket level instead.
        if self.mock_requests:
            responses.start()

        # Start with default config
        self._config = ConfigObj({
            'username': '******',
            'password': '******',
        })
        self.config = MagicMock(spec_set=self._config)
        self.config.__getitem__.side_effect = self._config.__getitem__
        self.config.__setitem__.side_effect = self._config.__setitem__
        self.config.get.side_effect = self._config.get
        patch('dpm.main.ConfigObj', lambda *a: self.config).start()

        self.runner = CliRunner()

    def _post_teardown(self):
        """ Disable all mocks """
        if self.mock_requests:
            responses.reset()
            responses.stop()
        # TODO: Mocket.disable() sometimes makes tests hang.
        #Mocket.disable()
        patch.stopall()

    def invoke(self, cli, args=None, **kwargs):
        """
        Invoke click command. If self.isolate is True, then delegate to
        self.runner.invoke(), which will create isolated environment.
        Otherwise invoke command directly, which should allow to use debugger.
        The issue is that debuggers are confused by click wrappers in
        place of sys.stdin and sys.stdout
        """
        kwargs.setdefault('catch_exceptions', False)
        with self.runner.isolated_filesystem():
            if self.isolate:
                result = self.runner.invoke(cli, args, **kwargs)
            else:
                if six.PY2:
                    stdout = stderr = bytes_output = StringIO()
                else:
                    bytes_output = io.BytesIO()
                    stdout = stderr = io.TextIOWrapper(
                        bytes_output, encoding='utf-8')

                patch('click.utils._default_text_stdout', lambda: stdout).start()
                patch('click.utils._default_text_stderr', lambda: stderr).start()
                exit_code = 0
                exception = None
                exc_info = None
                try:
                    cli.main(args=args, prog_name=cli.name or 'root')
                except SystemExit as e:
                    exit_code = e.code
                result = Result(runner=self.runner,
                                output_bytes=bytes_output.getvalue(),
                                exit_code=exit_code,
                                exception=exception,
                                exc_info=exc_info)
        return result