Esempio n. 1
0
def text_monitor_plugin(text_monitor_workbench):
    """Create a text monitor with some interesting preferences.

    """
    conf = {}
    path = 'ecpy.measure.monitors.text_monitor'
    rule1 = {
        'class_id': 'ecpy.FormatRule',
        'id': 'test_format',
        'suffixes': repr(['a', 'b']),
        'new_entry_formatting': '{a}/{b}',
        'new_entry_suffix': 'c'
    }
    rule2 = {
        'class_id': 'ecpy.RejectRule',
        'id': 'test_reject',
        'suffixes': repr(['a', 'b'])
    }
    conf[path] = {
        '_user_rules': repr({
            'test_format': rule1,
            'test_reject': rule2
        }),
        'default_rules': repr(['test_format', 'unknown'])
    }
    set_preferences(text_monitor_workbench, conf)
    p = text_monitor_workbench.get_plugin('ecpy.measure.monitors.text_monitor')
    # Set manually as we added those without the preferences.
    p.default_rules = ['test_format', 'unknown']
    return p
Esempio n. 2
0
def test_handling_not_found_default_tools(measure_workbench):
    """Test handling the non-dectection of default tools.

    """
    set_preferences(measure_workbench, {"ecpy.measure": {"default_monitors": "['dummy']"}})

    with pytest.raises(ErrorDialogException):
        measure_workbench.get_plugin("ecpy.measure")
Esempio n. 3
0
def test_handling_missing_default_rule(text_monitor_workbench, caplog):
    """Test that default rules not backed by a config are discarded.

    """
    conf = {}
    path = 'ecpy.measure.monitors.text_monitor'
    conf[path] = {'default_rules': repr(['test_format', 'unknown'])}
    set_preferences(text_monitor_workbench, conf)
    text_monitor_workbench.get_plugin('ecpy.measure.monitors.text_monitor')
    assert caplog.records
Esempio n. 4
0
def test_starting_with_default_tools(measure_workbench):
    """Test staring with default selected tools.

    """
    measure_workbench.register(MeasureTestManifest())
    set_preferences(measure_workbench, {"ecpy.measure": {"default_monitors": "['dummy']"}})

    plugin = measure_workbench.get_plugin("ecpy.measure")

    assert plugin.default_monitors
Esempio n. 5
0
def test_handling_not_found_default_tools(measure_workbench):
    """Test handling the non-dectection of default tools.

    """
    set_preferences(measure_workbench,
                    {'ecpy.measure': {
                        'default_monitors': "['dummy']"
                    }})

    with pytest.raises(ErrorDialogException):
        measure_workbench.get_plugin('ecpy.measure')
Esempio n. 6
0
def test_starting_with_default_tools(measure_workbench):
    """Test staring with default selected tools.

    """
    measure_workbench.register(MeasureTestManifest())
    set_preferences(measure_workbench,
                    {'ecpy.measure': {
                        'default_monitors': "['dummy']"
                    }})

    plugin = measure_workbench.get_plugin('ecpy.measure')

    assert plugin.default_monitors
Esempio n. 7
0
def test_starting_with_a_default_selected_engine(measure_workbench):
    """Test that an engine selected by default is well mounted.

    """
    measure_workbench.register(MeasureTestManifest())
    set_preferences(measure_workbench, {"ecpy.measure": {"selected_engine": "dummy"}})

    plugin = measure_workbench.get_plugin("ecpy.measure")

    decl = plugin.get_declarations("engine", ["dummy"])["dummy"]

    assert plugin.selected_engine == "dummy"
    assert decl.selected
Esempio n. 8
0
def test_overriding_preferences_if_absent(icon_workbench):
    """Test that we fall back to FontAwesome is the selected theme in the
    preferences does not exist.

    """
    set_preferences(
        icon_workbench,
        {'ecpy.app.icons': {
            'current_theme': '_d_',
            'fallback_theme': '_f'
        }})
    pl = icon_workbench.get_plugin('ecpy.app.icons')
    assert pl.current_theme == pl.icon_themes[0]
    assert pl.fallback_theme == 'ecpy.FontAwesome'
Esempio n. 9
0
def test_starting_with_a_default_selected_engine(measure_workbench):
    """Test that an engine selected by default is well mounted.

    """
    measure_workbench.register(MeasureTestManifest())
    set_preferences(measure_workbench,
                    {'ecpy.measure': {'selected_engine': 'dummy'}})

    plugin = measure_workbench.get_plugin('ecpy.measure')

    decl = plugin.get_declarations('engine', ['dummy'])['dummy']

    assert plugin.selected_engine == 'dummy'
    assert decl.selected
Esempio n. 10
0
def test_starting_with_a_default_selected_engine(measure_workbench):
    """Test that an engine selected by default is well mounted.

    """
    measure_workbench.register(MeasureTestManifest())
    set_preferences(measure_workbench,
                    {'ecpy.measure': {
                        'selected_engine': 'dummy'
                    }})

    plugin = measure_workbench.get_plugin('ecpy.measure')

    decl = plugin.get_declarations('engine', ['dummy'])['dummy']

    assert plugin.selected_engine == 'dummy'
    assert decl.selected
Esempio n. 11
0
def text_monitor_plugin(text_monitor_workbench):
    """Create a text monitor with some interesting preferences.

    """
    conf = {}
    path = 'ecpy.measure.monitors.text_monitor'
    rule1 = {'class_id': 'ecpy.FormatRule', 'id': 'test_format',
             'suffixes': repr(['a', 'b']),
             'new_entry_formatting': '{a}/{b}',
             'new_entry_suffix': 'c'}
    rule2 = {'class_id': 'ecpy.RejectRule',
             'id': 'test_reject',
             'suffixes': repr(['a', 'b'])}
    conf[path] = {'_user_rules': repr({'test_format': rule1,
                                       'test_reject': rule2}),
                  'default_rules': repr(['test_format', 'unknown'])}
    set_preferences(text_monitor_workbench, conf)
    p = text_monitor_workbench.get_plugin('ecpy.measure.monitors.text_monitor')
    return p