Example #1
0
def test_seq_rs():
    runner = CliRunner()
    result = runner.invoke(bounds.bounds, [], feature_seq_pp_rs)
    assert result.exit_code == 0
    assert result.output.count('[') == result.output.count(']') == 2
    assert len(re.findall(r'\d*\.\d*', result.output)) == 8
 def test_load_with_app_id(self, load, deployment):
     CliRunner().invoke(load,
                        ["--app", "some app id", "--replay", "--verbose"])
     deployment.assert_called_once_with("some app id", mock.ANY, True,
                                        {"replay": True})
    def test_bot_with_debug_url(self, bot_command, mock_bot):
        CliRunner().invoke(bot_command,
                           ["--app", "some app id", "--debug", "some url"])

        assert mock_bot.run_experiment.called
Example #4
0
def test_input_valid(manage_deployment_backend):
    run_options_test(
        CliRunner(), manage_deployment, ALL_VALID_OPTIONS, manage_deployment_backend.run
    )
 def test_sandbox_with_no_app_id(self, sandbox, deploy_in_mode):
     CliRunner().invoke(sandbox, ["--verbose"])
     deploy_in_mode.assert_called_once_with("sandbox",
                                            app=None,
                                            verbose=True,
                                            log=mock.ANY)
Example #6
0
def test_apprise_cli_nux_env(tmpdir):
    """
    CLI: Nux Environment

    """
    class GoodNotification(NotifyBase):
        def __init__(self, *args, **kwargs):
            super(GoodNotification, self).__init__(*args, **kwargs)

        def notify(self, **kwargs):
            # Pretend everything is okay
            return True

        def url(self, *args, **kwargs):
            # Support url()
            return 'good://'

    class BadNotification(NotifyBase):
        def __init__(self, *args, **kwargs):
            super(BadNotification, self).__init__(*args, **kwargs)

        def notify(self, **kwargs):
            # Force a notification failure
            return False

        def url(self, *args, **kwargs):
            # Support url()
            return 'bad://'

    # Set up our notification types
    SCHEMA_MAP['good'] = GoodNotification
    SCHEMA_MAP['bad'] = BadNotification

    runner = CliRunner()
    result = runner.invoke(cli.main)
    # no servers specified; we return 1 (non-zero)
    assert result.exit_code == 1

    result = runner.invoke(cli.main, ['-v'])
    assert result.exit_code == 1

    result = runner.invoke(cli.main, ['-vv'])
    assert result.exit_code == 1

    result = runner.invoke(cli.main, ['-vvv'])
    assert result.exit_code == 1

    result = runner.invoke(cli.main, ['-vvvv'])
    assert result.exit_code == 1

    # Display version information and exit
    result = runner.invoke(cli.main, ['-V'])
    assert result.exit_code == 0

    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        '-b',
        'test body',
        'good://localhost',
    ])
    assert result.exit_code == 0

    with mock.patch('requests.post') as mock_post:
        # Prepare Mock
        mock_post.return_value = requests.Request()
        mock_post.return_value.status_code = requests.codes.ok

        result = runner.invoke(
            cli.main,
            [
                '-t',
                'test title',
                '-b',
                'test body\\nsNewLine',
                # Test using interpret escapes
                '-e',
                # Use our JSON query
                'json://localhost',
            ])
        assert result.exit_code == 0

        # Test our call count
        assert mock_post.call_count == 1

        # Our string is now escaped correctly
        json.loads(mock_post.call_args_list[0][1]['data'])\
            .get('message', '') == 'test body\nsNewLine'

        # Reset
        mock_post.reset_mock()

        result = runner.invoke(
            cli.main,
            [
                '-t',
                'test title',
                '-b',
                'test body\\nsNewLine',
                # No -e switch at all (so we don't escape the above)
                # Use our JSON query
                'json://localhost',
            ])
        assert result.exit_code == 0

        # Test our call count
        assert mock_post.call_count == 1

        # Our string is now escaped correctly
        json.loads(mock_post.call_args_list[0][1]['data'])\
            .get('message', '') == 'test body\\nsNewLine'

    # Run in synchronous mode
    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        '-b',
        'test body',
        'good://localhost',
        '--disable-async',
    ])
    assert result.exit_code == 0

    # Test Debug Mode (--debug)
    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        '-b',
        'test body',
        'good://localhost',
        '--debug',
    ])
    assert result.exit_code == 0

    # Test Debug Mode (-D)
    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        '-b',
        'test body',
        'good://localhost',
        '-D',
    ])
    assert result.exit_code == 0

    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        'good://localhost',
    ],
                           input='test stdin body\n')
    assert result.exit_code == 0

    # Run in synchronous mode
    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        'good://localhost',
        '--disable-async',
    ],
                           input='test stdin body\n')
    assert result.exit_code == 0

    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        '-b',
        'test body',
        'bad://localhost',
    ])
    assert result.exit_code == 1

    # Run in synchronous mode
    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        '-b',
        'test body',
        'bad://localhost',
        '-Da',
    ])
    assert result.exit_code == 1

    # Testing with the --dry-run flag reveals a successful response since we
    # don't actually execute the bad:// notification; we only display it
    result = runner.invoke(cli.main, [
        '-t',
        'test title',
        '-b',
        'test body',
        'bad://localhost',
        '--dry-run',
    ])
    assert result.exit_code == 0

    # Write a simple text based configuration file
    t = tmpdir.mkdir("apprise-obj").join("apprise")
    buf = """
    # Include ourselves
    include {}

    taga,tagb=good://localhost
    tagc=good://nuxref.com
    """.format(str(t))
    t.write(buf)

    # This will read our configuration and not send any notices at all
    # because we assigned tags to all of our urls and didn't identify
    # a specific match below.

    # 'include' reference in configuration file would have included the file a
    # second time (since recursion default is 1).
    result = runner.invoke(cli.main, [
        '-b',
        'test config',
        '--config',
        str(t),
    ])
    # Even when recursion take place, tags are all honored
    # so 2 is returned because nothing was notified
    assert result.exit_code == 3

    # This will send out 1 notification because our tag matches
    # one of the entries above
    # translation: has taga
    result = runner.invoke(cli.main, [
        '-b',
        'has taga',
        '--config',
        str(t),
        '--tag',
        'taga',
    ])
    assert result.exit_code == 0

    # Test recursion
    result = runner.invoke(
        cli.main,
        [
            '-t',
            'test title',
            '-b',
            'test body',
            '--config',
            str(t),
            '--tag',
            'tagc',
            # Invalid entry specified for recursion
            '-R',
            'invalid',
        ])
    assert result.exit_code == 2

    result = runner.invoke(
        cli.main,
        [
            '-t',
            'test title',
            '-b',
            'test body',
            '--config',
            str(t),
            '--tag',
            'tagc',
            # missing entry specified for recursion
            '--recursive-depth',
        ])
    assert result.exit_code == 2

    result = runner.invoke(
        cli.main,
        [
            '-t',
            'test title',
            '-b',
            'test body',
            '--config',
            str(t),
            '--tag',
            'tagc',
            # Disable recursion (thus inclusion will be ignored)
            '-R',
            '0',
        ])
    assert result.exit_code == 0

    # Test recursion
    result = runner.invoke(
        cli.main,
        [
            '-t',
            'test title',
            '-b',
            'test body',
            '--config',
            str(t),
            '--tag',
            'tagc',
            # Recurse up to 5 times
            '--recursion-depth',
            '5',
        ])
    assert result.exit_code == 0

    # This will send out 2 notifications because by specifying 2 tag
    # entries, we 'or' them together:
    # translation: has taga or tagb or tagd
    result = runner.invoke(cli.main, [
        '-b',
        'has taga OR tagc OR tagd',
        '--config',
        str(t),
        '--tag',
        'taga',
        '--tag',
        'tagc',
        '--tag',
        'tagd',
    ])
    assert result.exit_code == 0

    # Write a simple text based configuration file
    t = tmpdir.mkdir("apprise-obj2").join("apprise-test2")
    buf = """
    good://localhost/1
    good://localhost/2
    good://localhost/3
    good://localhost/4
    good://localhost/5
    myTag=good://localhost/6
    """
    t.write(buf)

    # This will read our configuration and send a notification to
    # the first 5 entries in the list, but not the one that has
    # the tag associated with it
    result = runner.invoke(cli.main, [
        '-b',
        'test config',
        '--config',
        str(t),
    ])
    assert result.exit_code == 0

    # Test our notification type switch (it defaults to info) so we want to
    # try it as a different value. Should return without a problem
    result = runner.invoke(cli.main, [
        '-b',
        '# test config',
        '--config',
        str(t),
        '-n',
        'success',
    ])
    assert result.exit_code == 0

    # Test our notification type switch when set to something unsupported
    result = runner.invoke(cli.main, [
        '-b',
        'test config',
        '--config',
        str(t),
        '--notification-type',
        'invalid',
    ])
    # An error code of 2 is returned if invalid input is specified on the
    # command line
    assert result.exit_code == 2

    # The notification type switch is case-insensitive
    result = runner.invoke(cli.main, [
        '-b',
        'test config',
        '--config',
        str(t),
        '--notification-type',
        'WARNING',
    ])
    assert result.exit_code == 0

    # Test our formatting switch (it defaults to text) so we want to try it as
    # a different value. Should return without a problem
    result = runner.invoke(cli.main, [
        '-b',
        '# test config',
        '--config',
        str(t),
        '-i',
        'markdown',
    ])
    assert result.exit_code == 0

    # Test our formatting switch when set to something unsupported
    result = runner.invoke(cli.main, [
        '-b',
        'test config',
        '--config',
        str(t),
        '--input-format',
        'invalid',
    ])
    # An error code of 2 is returned if invalid input is specified on the
    # command line
    assert result.exit_code == 2

    # The formatting switch is not case sensitive
    result = runner.invoke(cli.main, [
        '-b',
        '# test config',
        '--config',
        str(t),
        '--input-format',
        'HTML',
    ])
    assert result.exit_code == 0

    # As a way of ensuring we match the first 5 entries, we can run a
    # --dry-run against the same result set above and verify the output
    result = runner.invoke(cli.main, [
        '-b',
        'test config',
        '--config',
        str(t),
        '--dry-run',
    ])
    assert result.exit_code == 0
    lines = re.split(r'[\r\n]', result.output.strip())
    # 5 lines of all good:// entries matched
    assert len(lines) == 5
    # Verify we match against the remaining good:// entries
    for i in range(0, 5):
        assert lines[i].endswith('good://')

    # This will fail because nothing matches mytag. It's case sensitive
    # and we would only actually match against myTag
    result = runner.invoke(cli.main, [
        '-b',
        'has mytag',
        '--config',
        str(t),
        '--tag',
        'mytag',
    ])
    assert result.exit_code == 3

    # Same command as the one identified above except we set the --dry-run
    # flag. This causes our list of matched results to be printed only.
    # However, since we don't match anything; we still fail with a return code
    # of 2.
    result = runner.invoke(
        cli.main,
        ['-b', 'has mytag', '--config',
         str(t), '--tag', 'mytag', '--dry-run'])
    assert result.exit_code == 3

    # Here is a case where we get what was expected; we also attach a file
    result = runner.invoke(cli.main, [
        '-b',
        'has myTag',
        '--config',
        str(t),
        '--attach',
        join(dirname(__file__), 'var', 'apprise-test.gif'),
        '--tag',
        'myTag',
    ])
    assert result.exit_code == 0

    # Testing with the --dry-run flag reveals the same positive results
    # because there was at least one match
    result = runner.invoke(cli.main, [
        '-b',
        'has myTag',
        '--config',
        str(t),
        '--tag',
        'myTag',
        '--dry-run',
    ])
    assert result.exit_code == 0

    #
    # Test environment variables
    #
    # Write a simple text based configuration file
    t2 = tmpdir.mkdir("apprise-obj-env").join("apprise")
    buf = """
    # A general one
    good://localhost

    # A failure (if we use the fail tag)
    fail=bad://localhost

    # A normal one tied to myTag
    myTag=good://nuxref.com
    """
    t2.write(buf)

    with environ(APPRISE_URLS="good://localhost"):
        # This will load okay because we defined the environment
        # variable with a valid URL
        result = runner.invoke(
            cli.main,
            [
                '-b',
                'test environment',
                # Test that we ignore our tag
                '--tag',
                'mytag',
            ])
        assert result.exit_code == 0

        # Same action but without --tag
        result = runner.invoke(cli.main, [
            '-b',
            'test environment',
        ])
        assert result.exit_code == 0

    with mock.patch('apprise.cli.DEFAULT_SEARCH_PATHS', []):
        with environ(APPRISE_URLS="      "):
            # An empty string is not valid and therefore not loaded so the
            # below fails. We override the DEFAULT_SEARCH_PATHS because we
            # don't want to detect ones loaded on the machine running the unit
            # tests
            result = runner.invoke(cli.main, [
                '-b',
                'test environment',
            ])
            assert result.exit_code == 1

    with environ(APPRISE_URLS="bad://localhost"):
        result = runner.invoke(cli.main, [
            '-b',
            'test environment',
        ])
        assert result.exit_code == 1

        # If we specify an inline URL, it will over-ride the environment
        # variable
        result = runner.invoke(cli.main, [
            '-t',
            'test title',
            '-b',
            'test body',
            'good://localhost',
        ])
        assert result.exit_code == 0

        # A Config file also over-rides the environment variable if
        # specified on the command line:
        result = runner.invoke(cli.main, [
            '-b',
            'has myTag',
            '--config',
            str(t2),
            '--tag',
            'myTag',
        ])
        assert result.exit_code == 0

    with environ(APPRISE_CONFIG=str(t2)):
        # Our configuration file will load from our environmment variable
        result = runner.invoke(cli.main, [
            '-b',
            'has myTag',
            '--tag',
            'myTag',
        ])
        assert result.exit_code == 0

    with mock.patch('apprise.cli.DEFAULT_SEARCH_PATHS', []):
        with environ(APPRISE_CONFIG="      "):
            # We will fail to send the notification as no path was
            # specified.
            # We override the DEFAULT_SEARCH_PATHS because we don't
            # want to detect ones loaded on the machine running the unit tests
            result = runner.invoke(cli.main, [
                '-b',
                'my message',
            ])
            assert result.exit_code == 1

    with environ(APPRISE_CONFIG="garbage/file/path.yaml"):
        # We will fail to send the notification as the path
        # specified is not loadable
        result = runner.invoke(cli.main, [
            '-b',
            'my message',
        ])
        assert result.exit_code == 1

        # We can force an over-ride by specifying a config file on the
        # command line options:
        result = runner.invoke(cli.main, [
            '-b',
            'has myTag',
            '--config',
            str(t2),
            '--tag',
            'myTag',
        ])
        assert result.exit_code == 0

    # Just a general test; if both the --config and urls are specified
    # then the the urls trumps all
    result = runner.invoke(cli.main, [
        '-b',
        'has myTag',
        '--config',
        str(t2),
        'good://localhost',
        '--tag',
        'fail',
    ])
    # Tags are ignored, URL specified, so it trump config
    assert result.exit_code == 0

    # we just repeat the test as a proof that it only executes
    # the urls despite the fact the --config was specified
    result = runner.invoke(cli.main, [
        '-b',
        'reads the url entry only',
        '--config',
        str(t2),
        'good://localhost',
        '--tag',
        'fail',
    ])
    # Tags are ignored, URL specified, so it trump config
    assert result.exit_code == 0

    # once agian, but we call bad://
    result = runner.invoke(cli.main, [
        '-b',
        'reads the url entry only',
        '--config',
        str(t2),
        'bad://localhost',
        '--tag',
        'myTag',
    ])
    assert result.exit_code == 1

    # Test Escaping:
    result = runner.invoke(cli.main, [
        '-e',
        '-t',
        'test\ntitle',
        '-b',
        'test\nbody',
        'good://localhost',
    ])
    assert result.exit_code == 0

    # Test Escaping (without title)
    result = runner.invoke(cli.main, [
        '--interpret-escapes',
        '-b',
        'test\nbody',
        'good://localhost',
    ])
    assert result.exit_code == 0
 def setUp(self):
     self.runner = CliRunner()
Example #8
0
def runner():
    return CliRunner()
def test_push_invalid_dest_ref(cd_to_monorepo):
    result = CliRunner().invoke(git_apple_llvm_push, ['HEAD:dest'])
    assert 'refspec "dest" is invalid' in result.output
    assert result.exit_code == 1
Example #10
0
def test_with_obj():
    runner = CliRunner()
    result = runner.invoke(bounds.bounds, ['--with-obj'], feature_seq)
    assert result.exit_code == 0
    assert result.output.count('geometry') == result.output.count('bbox') == 2
Example #11
0
def test_bounds_explode_with_obj():
    runner = CliRunner()
    result = runner.invoke(bounds.bounds, ['--explode', '--with-obj'],
                           feature_collection)
    assert result.exit_code == 0
    assert result.output.count('geometry') == result.output.count('bbox') == 2
Example #12
0
def test_explode_with_id():
    runner = CliRunner()
    result = runner.invoke(bounds.bounds, ['--explode', '--with-id'],
                           feature_collection)
    assert result.exit_code == 0
    assert result.output.count('id') == result.output.count('bbox') == 2
Example #13
0
def test_explode_pp():
    runner = CliRunner()
    result = runner.invoke(bounds.bounds, ['--explode'], feature_collection_pp)
    assert result.exit_code == 0
    assert result.output.count('[') == result.output.count(']') == 2
    assert len(re.findall(r'\d*\.\d*', result.output)) == 8
Example #14
0
def test_precision():
    runner = CliRunner()
    result = runner.invoke(bounds.bounds, ['--precision', 1], feature_seq)
    assert result.exit_code == 0
    assert result.output.count('[') == result.output.count(']') == 2
    assert len(re.findall(r'\d*\.\d{1}\D', result.output)) == 8
 def test_help(self):
     runner = CliRunner()
     result = runner.invoke(bitio, ["--help", "bitio"])
     assert result.exit_code == 0
def test_push_invalid_single_ref_name(cd_to_monorepo):
    result = CliRunner().invoke(git_apple_llvm_push, ['foo'])
    assert 'refspec "foo" is invalid' in result.output
    assert result.exit_code == 1
 def test_no_args(self):
     """Test that bit with no arguments returns same as with --help"""
     runner = CliRunner()
     res1 = runner.invoke(bitio)
     res2 = runner.invoke(bitio, ["--help", "bitio"])
     assert res1.output == res2.output
def test_push_unsupported_def_ref(cd_to_monorepo_clone):
    result = CliRunner().invoke(git_apple_llvm_push, ['HEAD:llvm/master'])
    assert 'destination Git refspec "llvm/master" cannot be pushed to' in result.output
    assert result.exit_code == 1
Example #19
0
def test_apprise_cli_details(tmpdir):
    """
    API: Apprise() Disabled Plugin States

    """

    runner = CliRunner()

    #
    # Testing the printout of our details
    #   --details or -l
    #
    result = runner.invoke(cli.main, [
        '--details',
    ])
    assert result.exit_code == 0

    result = runner.invoke(cli.main, [
        '-l',
    ])
    assert result.exit_code == 0

    # Reset our matrix
    __reset_matrix()

    # This is a made up class that is just used to verify
    class TestReq01Notification(NotifyBase):
        """
        This class is used to test various requirement configurations
        """

        # Set some requirements
        requirements = {
            'packages_required': [
                'cryptography <= 3.4',
                'ultrasync',
            ],
            'packages_recommended': 'django',
        }

        def url(self, **kwargs):
            # Support URL
            return ''

        def send(self, **kwargs):
            # Pretend everything is okay (so we don't break other tests)
            return True

    SCHEMA_MAP['req01'] = TestReq01Notification

    # This is a made up class that is just used to verify
    class TestReq02Notification(NotifyBase):
        """
        This class is used to test various requirement configurations
        """

        # Just not enabled at all
        enabled = False

        # Set some requirements
        requirements = {
            # None and/or [] is implied, but jsut to show that the code won't
            # crash if explicitly set this way:
            'packages_required': None,
            'packages_recommended': [
                'cryptography <= 3.4',
            ]
        }

        def url(self, **kwargs):
            # Support URL
            return ''

        def send(self, **kwargs):
            # Pretend everything is okay (so we don't break other tests)
            return True

    SCHEMA_MAP['req02'] = TestReq02Notification

    # This is a made up class that is just used to verify
    class TestReq03Notification(NotifyBase):
        """
        This class is used to test various requirement configurations
        """

        # Set some requirements (but additionally include a details over-ride)
        requirements = {
            # We can over-ride the default details assigned to our plugin if
            # specified
            'details': _('some specified requirement details'),

            # We can set a string value as well (it does not have to be a list)
            'packages_recommended': 'cryptography <= 3.4'
        }

        def url(self, **kwargs):
            # Support URL
            return ''

        def send(self, **kwargs):
            # Pretend everything is okay (so we don't break other tests)
            return True

    SCHEMA_MAP['req03'] = TestReq03Notification

    # This is a made up class that is just used to verify
    class TestReq04Notification(NotifyBase):
        """
        This class is used to test a case where our requirements is fixed
        to a None
        """

        # This is the same as saying there are no requirements
        requirements = None

        def url(self, **kwargs):
            # Support URL
            return ''

        def send(self, **kwargs):
            # Pretend everything is okay (so we don't break other tests)
            return True

    SCHEMA_MAP['req04'] = TestReq04Notification

    # This is a made up class that is just used to verify
    class TestReq05Notification(NotifyBase):
        """
        This class is used to test a case where only packages_recommended
        is identified
        """

        requirements = {'packages_recommended': 'cryptography <= 3.4'}

        def url(self, **kwargs):
            # Support URL
            return ''

        def send(self, **kwargs):
            # Pretend everything is okay (so we don't break other tests)
            return True

    SCHEMA_MAP['req05'] = TestReq04Notification

    class TestDisabled01Notification(NotifyBase):
        """
        This class is used to test a pre-disabled state
        """

        # Just flat out disable our service
        enabled = False

        # we'll use this as a key to make our service easier to find
        # in the next part of the testing
        service_name = 'na01'

        def url(self, **kwargs):
            # Support URL
            return ''

        def notify(self, **kwargs):
            # Pretend everything is okay (so we don't break other tests)
            return True

    SCHEMA_MAP['na01'] = TestDisabled01Notification

    class TestDisabled02Notification(NotifyBase):
        """
        This class is used to test a post-disabled state
        """

        # we'll use this as a key to make our service easier to find
        # in the next part of the testing
        service_name = 'na02'

        def __init__(self, *args, **kwargs):
            super(TestDisabled02Notification, self).__init__(**kwargs)

            # enable state changes **AFTER** we initialize
            self.enabled = False

        def url(self, **kwargs):
            # Support URL
            return ''

        def notify(self, **kwargs):
            # Pretend everything is okay (so we don't break other tests)
            return True

    SCHEMA_MAP['na02'] = TestDisabled02Notification

    # We'll add a good notification to our list
    class TesEnabled01Notification(NotifyBase):
        """
        This class is just a simple enabled one
        """

        # we'll use this as a key to make our service easier to find
        # in the next part of the testing
        service_name = 'good'

        def url(self, **kwargs):
            # Support URL
            return ''

        def send(self, **kwargs):
            # Pretend everything is okay (so we don't break other tests)
            return True

    SCHEMA_MAP['good'] = TesEnabled01Notification

    # Verify that we can pass through all of our different details
    result = runner.invoke(cli.main, [
        '--details',
    ])
    assert result.exit_code == 0

    result = runner.invoke(cli.main, [
        '-l',
    ])
    assert result.exit_code == 0

    # Reset our matrix
    __reset_matrix()
    __load_matrix()
def test_push_up_to_date(cd_to_monorepo_clone):
    result = CliRunner().invoke(git_apple_llvm_push, ['HEAD:internal/master'])
    assert 'No commits to commit: everything up-to-date' in result.output
    assert result.exit_code == 0
Example #21
0
    if group_only:
        click.echo('--group-only passed.')


@cli.command()
@click.option('--foo', default='foo')
def foo(foo):
    click.echo(foo)


@cli.command()
def bar():
    click.echo('bar')


r = CliRunner()


def test_default_command_with_arguments():
    assert r.invoke(cli, ['--foo', 'foooo']).output == 'foooo\n'
    assert 'no such option' in r.invoke(cli, ['-x']).output


def test_group_arguments():
    assert r.invoke(cli, ['--group-only']).output == '--group-only passed.\n'


def test_explicit_command():
    assert r.invoke(cli, ['foo']).output == 'foo\n'
    assert r.invoke(cli, ['bar']).output == 'bar\n'
def test_push_prohibited_split_dir(cd_to_monorepo_clone):
    commit_file('libcxxabi/testplan', 'it works!')
    result = CliRunner().invoke(git_apple_llvm_push, ['HEAD:internal/master',
                                                      '--merge-strategy=ff-only'])
    assert 'push configuration "internal-master" prohibits pushing to "libcxxabi"' in result.output
    assert result.exit_code == 1
Example #23
0
    def test_creates_debug_deployment(self, debug, deployment):
        CliRunner().invoke(debug, [])

        deployment.assert_called_once()
def test_no_errors_when_clear(tmpdir_setup):
    """Should raise no errors when no cached files are present"""
    CliRunner().invoke(clear)
Example #25
0
 def test_deploy_with_app_id(self, deploy, deploy_in_mode):
     CliRunner().invoke(deploy, ["--verbose", "--app", "some app id"])
     deploy_in_mode.assert_called_once_with("live",
                                            app="some app id",
                                            verbose=True,
                                            log=mock.ANY)
Example #26
0
def test_docs_build_happy_paths_build_site_on_multiple_site_context(
    mock_webbrowser,
    mock_emit,
    invocation,
    cli_input,
    expected_stdout,
    expected_browser_call_count,
    expected_built_site_names,
    caplog,
    monkeypatch,
    context_with_two_sites,
):
    context = context_with_two_sites
    assert context.get_site_names() == ["local_site", "team_site"]

    root_dir = context.root_directory
    runner = CliRunner(mix_stderr=False)
    monkeypatch.chdir(os.path.dirname(context.root_directory))
    result = runner.invoke(
        cli,
        invocation,
        input=cli_input,
        catch_exceptions=False,
    )
    stdout = result.stdout

    assert result.exit_code == 0
    assert "The following Data Docs sites will be built" in stdout
    assert "Building Data Docs..." in stdout
    assert "Done building Data Docs" in stdout
    assert expected_stdout in stdout
    assert mock_webbrowser.call_count == expected_browser_call_count

    expected_usage_stats_messages = [
        mock.call(
            {"event_payload": {}, "event": "data_context.__init__", "success": True}
        ),
        mock.call(
            {
                "event_payload": {},
                "event": "data_context.build_data_docs",
                "success": True,
            }
        ),
    ]
    for _ in range(expected_browser_call_count):
        expected_usage_stats_messages.append(
            mock.call(
                {
                    "event_payload": {},
                    "event": "data_context.open_data_docs",
                    "success": True,
                }
            ),
        )
    expected_usage_stats_messages.append(
        mock.call(
            {
                "event": "cli.docs.build",
                "event_payload": {"api_version": "v3"},
                "success": True,
            }
        ),
    )
    assert mock_emit.call_args_list == expected_usage_stats_messages

    context = DataContext(root_dir)
    for expected_site_name in expected_built_site_names:
        assert expected_site_name in stdout
        site_dir = os.path.join(
            root_dir, context.GE_UNCOMMITTED_DIR, "data_docs", expected_site_name
        )
        assert os.path.isdir(site_dir)
        # Note the fixture has no expectations or validations - only check the index
        assert os.path.isfile(os.path.join(site_dir, "index.html"))

    assert_no_logging_messages_or_tracebacks(
        my_caplog=caplog,
        click_result=result,
    )

    assert_no_logging_messages_or_tracebacks(
        my_caplog=caplog,
        click_result=result,
    )
Example #27
0
 def test_summary(self, summary, patched_summary_route):
     result = CliRunner().invoke(summary, ["--app", "some app id"])
     assert "Yield: 50.00%" in result.output
Example #28
0
def test_docs_build_happy_paths_build_site_on_single_site_context(
    mock_webbrowser,
    mock_emit,
    invocation,
    cli_input,
    expected_stdout,
    expected_browser_call_count,
    caplog,
    monkeypatch,
    titanic_data_context_stats_enabled_config_version_3,
):
    context = titanic_data_context_stats_enabled_config_version_3
    root_dir = context.root_directory
    runner = CliRunner(mix_stderr=False)
    monkeypatch.chdir(os.path.dirname(context.root_directory))
    result = runner.invoke(
        cli,
        invocation,
        input=cli_input,
        catch_exceptions=False,
    )
    stdout = result.stdout

    assert result.exit_code == 0
    assert "The following Data Docs sites will be built" in stdout
    assert "local_site" in stdout
    assert "Building Data Docs..." in stdout
    assert "Done building Data Docs" in stdout
    assert expected_stdout in stdout
    assert mock_webbrowser.call_count == expected_browser_call_count

    expected_usage_stats_messages = [
        mock.call(
            {"event_payload": {}, "event": "data_context.__init__", "success": True}
        ),
        mock.call(
            {
                "event_payload": {},
                "event": "data_context.build_data_docs",
                "success": True,
            }
        ),
    ]
    if expected_browser_call_count == 1:
        expected_usage_stats_messages.append(
            mock.call(
                {
                    "event_payload": {},
                    "event": "data_context.open_data_docs",
                    "success": True,
                }
            ),
        )
    expected_usage_stats_messages.append(
        mock.call(
            {
                "event": "cli.docs.build",
                "event_payload": {"api_version": "v3"},
                "success": True,
            }
        ),
    )
    assert mock_emit.call_args_list == expected_usage_stats_messages

    context = DataContext(root_dir)
    obs_urls = context.get_docs_sites_urls()

    assert len(obs_urls) == 1
    assert (
        "great_expectations/uncommitted/data_docs/local_site/index.html"
        in obs_urls[0]["site_url"]
    )
    site_dir = os.path.join(
        root_dir, context.GE_UNCOMMITTED_DIR, "data_docs", "local_site"
    )
    assert os.path.isdir(site_dir)
    # Note the fixture has no expectations or validations - only check the index
    assert os.path.isfile(os.path.join(site_dir, "index.html"))

    assert_no_logging_messages_or_tracebacks(
        my_caplog=caplog,
        click_result=result,
    )

    assert_no_logging_messages_or_tracebacks(
        my_caplog=caplog,
        click_result=result,
    )
Example #29
0
 def test_raises_with_no_qualification(self, revoke, mturk):
     result = CliRunner().invoke(revoke, [u"some worker id"],
                                 input=self.DO_IT)
     assert result.exit_code != 0
     assert "at least one worker ID" in result.output
Example #30
0
def test_fail():
    runner = CliRunner()
    result = runner.invoke(bounds.bounds, [], '5')
    assert result.exit_code == 1