Exemple #1
0
def blackbox_replay(blackbox, name, test):
    # Hide from traceback on assertions (reduce output size for failed tests).
    __tracebackhide__ = operator.methodcaller('errisinstance', AssertionError)
    definitions, instructions = test.strip().rsplit('\n\n', 1)
    for steno, translation in ast.literal_eval(
        '{' + definitions + '}'
    ).items():
        blackbox.dictionary.set(normalize_steno(steno), translation)
    # Track line number for a more user-friendly assertion message.
    lines = test.split('\n')
    lnum = len(lines)-3 - test.rstrip().rsplit('\n\n', 1)[1].count('\n')
    for step in re.split('(?<=[^\\\\])\n', instructions):
        # Mark current instruction's line.
        lnum += 1
        step = step.strip()
        # Support for changing some settings on the fly.
        if step.startswith(':'):
            action = step[1:]
            if action == 'start_attached':
                blackbox.formatter.start_attached = True
            elif action == 'spaces_after':
                blackbox.formatter.set_space_placement('After Output')
            elif action == 'spaces_before':
                blackbox.formatter.set_space_placement('Before Output')
            else:
                raise ValueError('invalid action:\n%s' % msg)
            continue
        steno, output = step.split(None, 1)
        steno = list(map(steno_to_stroke, normalize_steno(steno.strip())))
        output = output.strip()
        assert_msg = (
            name + '\n' +
            '\n'.join(('> ' if n == lnum else '  ') + l
                      for n, l in enumerate(lines)) + '\n'
        )
        if output.startswith("'") or output.startswith('"'):
            # Replay strokes.
            list(map(blackbox.translator.translate, steno))
            # Check output.
            expected_output = ast.literal_eval(output)
            assert_msg += (
                '   ' + repr(blackbox.output.text) + '\n'
                '!= ' + repr(expected_output)
            )
            assert blackbox.output.text == expected_output, assert_msg
        elif output.startswith('raise '):
            expected_exception = output[6:].strip()
            try:
                list(map(blackbox.translator.translate, steno))
            except Exception as e:
                exception_class = e.__class__.__name__
            else:
                exception_class = 'None'
            assert_msg += (
                '   ' + exception_class + '\n'
                '!= ' + expected_exception
            )
            assert exception_class == expected_exception, assert_msg
        else:
            raise ValueError('invalid output:\n%s' % output)
Exemple #2
0
def test_rtf_load(test_case):
    if isinstance(test_case, tuple):
        # Translation conversion test.
        rtf_entries = r'{\*\cxs S}' + test_case[0]
        dict_entries = { normalize_steno('S'): test_case[1] }
    else:
        rtf_entries, dict_entries = test_case.rsplit('\n\n', 1)
        dict_entries = {
            normalize_steno(k): v
            for k, v in ast.literal_eval('{' + dict_entries + '}').items()
        }
    rtf_styles = {
        0: 'Normal',
        1: 'Question',
        2: 'Answer',
        3: 'Colloquy',
        4: 'Continuation Q',
        5: 'Continuation A',
        6: 'Continuation Col',
        7: 'Paren',
        8: 'Centered',
    }
    rtf = (
        '\r\n'.join(
            [r'{\rtf1\ansi\cxdict{\*\cxrev100}{\*\cxsystem Fake Software}'] +
            [r'{\s%d %s;}' % (k, v) for k, v in rtf_styles.items()] +
            ['}'])
        + rtf_entries
        + '\r\n}'
    )
    with make_dict(rtf.encode('cp1252')) as filename:
        d = RtfDictionary.load(filename)
        assert dict(d.items()) == dict_entries
 def wrapper(self):
     f(self)
     definitions, strokes, output = f.__doc__.strip().split('\n\n')
     for steno, translation in ast.literal_eval(
         '{' + definitions + '}'
     ).items():
         self.dictionary.set(normalize_steno(steno), translation)
     for s in normalize_steno(strokes.strip()):
         self.translator.translate(steno_to_stroke(s))
     self.assertEqual(self.output.text, ast.literal_eval(output.strip()), msg=f.__doc__)
Exemple #4
0
 def test_normalize_steno(self):
     cases = (
     
     # TODO: More cases
     ('S', 'S'),
     ('S-', 'S'),
     ('-S', '-S'),
     ('ES', 'ES'),
     ('-ES', 'ES'),
     ('TW-EPBL', 'TWEPBL'),
     ('TWEPBL', 'TWEPBL'),
     ('19', '1-9'),
     ('14', '14'),
     ('146', '14-6'),
     ('67', '-67'),
     ('6', '-6'),
     ('9', '-9'),
     ('5', '5'),
     ('0', '0'),
     ('456', '456'),
     ('46', '4-6'),
     ('4*6', '4*6'),
     ('456', '456'),
     ('S46', 'S4-6'),
     )
     
     for arg, expected in cases:
         self.assertEqual('/'.join(normalize_steno(arg)), expected)
Exemple #5
0
 def test_bug557_resumed(self):
     # Using the asterisk key to delete letters in fingerspelled words
     # occasionally causes problems when the space placement is set to
     # "After Output".
     for steno, translation in (
         ('EU'      , 'I'      ),
         ('HRAOEUBG', 'like'   ),
         ('T*'      , '{>}{&t}'),
         ('A*'      , '{>}{&a}'),
         ('KR*'     , '{>}{&c}'),
         ('O*'      , '{>}{&o}'),
         ('S*'      , '{>}{&s}'),
     ):
         self.dictionary.set(normalize_steno(steno), translation)
     self.formatter.set_space_placement('After Output')
     for steno in (
         'EU',
         'HRAOEUBG',
         'T*', 'A*', 'KR*', 'O*', 'S*',
         '*', '*', '*', '*', '*',
         'HRAOEUBG',
     ):
         stroke = steno_to_stroke(steno)
         self.translator.translate(stroke)
     self.assertEqual(self.output.text, u'I like like ')
Exemple #6
0
 def on_apply_filter(self):
     self.table.selectionModel().clear()
     strokes_filter = '/'.join(
         normalize_steno(self.strokes_filter.text().strip()))
     translation_filter = unescape_translation(
         self.translation_filter.text().strip())
     self._model.filter(strokes_filter=strokes_filter,
                        translation_filter=translation_filter)
Exemple #7
0
def test_normalize_steno(steno, strokes):
    result = '/'.join(normalize_steno(steno))
    msg = 'normalize_steno(%r)=%r != %r' % (
        steno,
        result,
        strokes,
    )
    assert result == strokes, msg
 def _strokes(self):
     strokes = self.strokes.text().strip()
     has_prefix = strokes.startswith('/')
     strokes = '/'.join(strokes.replace('/', ' ').split())
     if has_prefix:
         strokes = '/' + strokes
     strokes = normalize_steno(strokes)
     return strokes
Exemple #9
0
 def parse():
     styles = load_stylesheet(s)
     converter = TranslationConverter(styles)
     for m in DICT_ENTRY_PATTERN.finditer(s):
         steno = normalize_steno(m.group('steno'))
         translation = m.group('translation')
         converted = converter(translation)
         if converted is not None:
             yield steno, converted
Exemple #10
0
 def parse():
     styles = load_stylesheet(s)
     converter = TranslationConverter(styles)
     for m in DICT_ENTRY_PATTERN.finditer(s):
         steno = normalize_steno(m.group('steno'))
         translation = m.group('translation')
         converted = converter(translation)
         if converted is not None:
             yield steno, converted
Exemple #11
0
 def load():
     for sheet, entries in book.items():
         self._sheets.append(sheet)
         for row in entries:
             if not row or not row[0]:
                 continue
             translation = row[1] if len(row) > 1 else ''
             steno = normalize_steno(row[0])
             yield steno, translation
             self._extras[steno] = (sheet, row[2:])
Exemple #12
0
def replay(blackbox, name, test):
    # Hide from traceback on assertions (reduce output size for failed tests).
    __tracebackhide__ = operator.methodcaller('errisinstance', AssertionError)
    definitions, instructions = test.strip().rsplit('\n\n', 1)
    for steno, translation in ast.literal_eval(
        '{' + definitions + '}'
    ).items():
        blackbox.dictionary.set(normalize_steno(steno), translation)
    # Track line number for a more user-friendly assertion message.
    lines = test.split('\n')
    lnum = len(lines)-3 - test.rstrip().rsplit('\n\n', 1)[1].count('\n')
    for step in re.split('(?<=[^\\\\])\n', instructions):
        # Mark current instruction's line.
        lnum += 1
        step = step.strip()
        # Support for changing some settings on the fly.
        if step.startswith(':'):
            action = step[1:]
            if action == 'start_attached':
                blackbox.formatter.start_attached = True
            elif action == 'spaces_after':
                blackbox.formatter.set_space_placement('After Output')
            elif action == 'spaces_before':
                blackbox.formatter.set_space_placement('Before Output')
            else:
                raise ValueError('invalid action:\n%s' % msg)
            continue
        # Replay strokes.
        strokes, output = step.split(None, 1)
        for s in normalize_steno(strokes.strip()):
            blackbox.translator.translate(steno_to_stroke(s))
        # Check output.
        expected_output = ast.literal_eval(output.strip())
        msg = (
            name + '\n' +
            '\n'.join(('> ' if n == lnum else '  ') + l
                      for n, l in enumerate(lines)) + '\n' +
            '   ' + repr(blackbox.output.text) + '\n'
            '!= ' + repr(expected_output)
        )
        assert blackbox.output.text == expected_output, msg
Exemple #13
0
def load_dictionary(s):
    """Load an RTF/CRE dictionary."""
    styles = load_stylesheet(s)
    d = {}
    converter = TranslationConverter(styles)
    for m in DICT_ENTRY_PATTERN.finditer(s):
        steno = normalize_steno(m.group('steno'))
        translation = m.group('translation')
        converted = converter(translation)
        if converted is not None:
            d[steno] = converted
    return StenoDictionary(d)
 def end(self, tag):
     if tag == 's':
         self._steno = self._text.replace(' ', '/')
         self._steno = normalize_steno(self._steno)
         self._text = None
     elif tag == 't':
         self._translation = self._text
         self._text = None
     elif tag == 'e':
         self._result[self._steno] = self._translation
         self._steno = None
         self._translation = None
Exemple #15
0
def load_dictionary(s):
    """Load an RTF/CRE dictionary."""
    styles = load_stylesheet(s)
    d = {}
    converter = TranslationConverter(styles)
    for m in DICT_ENTRY_PATTERN.finditer(s):
        steno = normalize_steno(m.group('steno'))
        translation = m.group('translation')
        converted = converter(translation)
        if converted is not None:
            d[steno] = converted
    return StenoDictionary(d)
Exemple #16
0
 def ApplyFilter(self, stroke_filter, translation_filter):
     stroke_filter = '/'.join(normalize_steno(stroke_filter))
     self.filtered_keys = []
     self.sorted_keys = []
     for di in self.added_items:
         if self._itemMatchesFilter(di, stroke_filter, translation_filter):
             self.filtered_keys.append(di)
     for di in self.all_keys:
         if di not in self.deleted_items:
             if self._itemMatchesFilter(di, stroke_filter,
                                        translation_filter):
                 self.filtered_keys.append(di)
     self._applySort()
Exemple #17
0
def load_dictionary(filename):
    """Load an RTF/CRE dictionary."""
    with resource_stream(filename) as fp:
        s = fp.read().decode('cp1252')
    styles = load_stylesheet(s)
    d = {}
    converter = TranslationConverter(styles)
    for m in DICT_ENTRY_PATTERN.finditer(s):
        steno = normalize_steno(m.group('steno'))
        translation = m.group('translation')
        converted = converter(translation)
        if converted is not None:
            d[steno] = converted
    return StenoDictionary(d)
def load_dictionary(filename):
    """Load an RTF/CRE dictionary."""
    with open(filename, 'rb') as fp:
        s = fp.read().decode('cp1252')
    styles = load_stylesheet(s)
    d = {}
    converter = TranslationConverter(styles)
    for m in DICT_ENTRY_PATTERN.finditer(s):
        steno = normalize_steno(m.group('steno'))
        translation = m.group('translation')
        converted = converter(translation)
        if converted is not None:
            d[steno] = converted
    return StenoDictionary(d)
Exemple #19
0
 def _load(self, filename):
     with open(filename, 'rb') as fp:
         contents = fp.read()
     for encoding in ('utf-8', 'latin-1'):
         try:
             contents = contents.decode(encoding)
         except UnicodeDecodeError:
             continue
         else:
             break
     else:
         raise ValueError('\'%s\' encoding could not be determined' % (filename,))
     d = dict(json.loads(contents))
     self.update((normalize_steno(x[0]), x[1]) for x in d.items())
 def ApplyFilter(self, stroke_filter, translation_filter):
     stroke_filter = '/'.join(normalize_steno(stroke_filter))
     self.filtered_keys = []
     self.sorted_keys = []
     for di in self.added_items:
         if self._itemMatchesFilter(di, stroke_filter, translation_filter):
             self.filtered_keys.append(di)
     for di in self.all_keys:
         if di not in self.deleted_items:
             if self._itemMatchesFilter(di,
                                        stroke_filter,
                                        translation_filter):
                 self.filtered_keys.append(di)
     self._applySort()
    def SaveChanges(self):
        self.pending_changes = False

        # Set of dictionaries (paths) that needs saving.
        needs_saving = set()

        # Creates
        for item in self.added_items:
            item.dictionary[normalize_steno(item.stroke)] = unescape_translation(item.translation)
            needs_saving.add(item.dictionary.get_path())

        # Updates
        for item_id in self.modified_items:
            item = self.all_keys[item_id]
            item.dictionary[normalize_steno(item.stroke)] = unescape_translation(item.translation)
            needs_saving.add(item.dictionary.get_path())

        # Deletes
        for item in self.deleted_items:
            del item.dictionary[normalize_steno(item.stroke)]
            needs_saving.add(item.dictionary.get_path())

        self.engine.get_dictionary().save(needs_saving)
Exemple #22
0
def load_dictionary(filename):

    for encoding in ('utf-8', 'latin-1'):
        try:
            with io.open(filename, 'r', encoding=encoding) as fp:
                d = json.load(fp)
                break
        except UnicodeDecodeError:
            continue
    else:
        raise ValueError('\'%s\' encoding could not be determined' % (filename,))

    return StenoDictionary((normalize_steno(x[0]), x[1])
                           for x in iteritems(dict(d)))
Exemple #23
0
def load_dictionary(filename):

    for encoding in ('utf-8', 'latin-1'):
        try:
            with resource_stream(filename, encoding=encoding) as fp:
                d = json.load(fp)
                break
        except UnicodeDecodeError:
            continue
    else:
        raise ValueError('\'%s\' encoding could not be determined' %
                         (filename, ))

    return StenoDictionary(
        (normalize_steno(x[0]), x[1]) for x in iteritems(dict(d)))
Exemple #24
0
 def _load(self, filename):
     with open(filename, 'rb') as fp:
         contents = fp.read()
     for encoding in ('utf-8', 'latin-1'):
         try:
             contents = contents.decode(encoding)
         except UnicodeDecodeError:
             continue
         else:
             break
     else:
         raise ValueError('\'%s\' encoding could not be determined' %
                          (filename, ))
     d = dict(json.loads(contents))
     self.update((normalize_steno(x[0]), x[1]) for x in iteritems(d))
Exemple #25
0
 def test_bug606(self):
     for steno, translation in (
         ('KWEGS', 'question'),
         ('-S'   , '{^s}'    ),
         ('TP-PL', '{.}'     ),
     ):
         self.dictionary.set(normalize_steno(steno), translation)
     self.formatter.set_space_placement('After Output')
     for steno in (
         'KWEGS',
         '-S',
         'TP-PL',
     ):
         stroke = steno_to_stroke(steno)
         self.translator.translate(stroke)
     self.assertEqual(self.output.text, u'questions. ')
Exemple #26
0
 def test_bug606(self):
     for steno, translation in (
         ('KWEGS', 'question'),
         ('-S', '{^s}'),
         ('TP-PL', '{.}'),
     ):
         self.dictionary.set(normalize_steno(steno), translation)
     self.formatter.set_space_placement('After Output')
     for steno in (
             'KWEGS',
             '-S',
             'TP-PL',
     ):
         stroke = steno_to_stroke(steno)
         self.translator.translate(stroke)
     self.assertEqual(self.output.text, u'questions. ')
Exemple #27
0
    def load_hjson_file(filename: str) -> Iterable[Tuple[Tuple[str], str]]:
        '''
        Loads an Hjson dictionary file and provides an iterable to its
        stroke to translation mappings.

        :param filename: The file path of the Hjson dictionary to load.
        :return: An iterable that provides tuples of stroke tuple, translation.
        '''

        # Load the data
        with open(filename, 'r', encoding='utf-8') as in_file:
            data = hjson.load(in_file, encoding='utf-8')

        # Provide tuples of stroke tuple, translation
        for translation, strokes in data.items():
            for stroke in strokes:
                yield (normalize_steno(stroke), translation)
Exemple #28
0
    def test_normalize_steno(self):
        cases = (
            # TODO: More cases
            ('S', 'S'),
            ('S-', 'S'),
            ('-S', '-S'),
            ('ES', 'ES'),
            ('-ES', 'ES'),
            ('TW-EPBL', 'TWEPBL'),
            ('TWEPBL', 'TWEPBL'),
            ('19', '1-9'),
            ('14', '14'),
            ('146', '14-6'),
            ('67', '-67'),
            ('6', '-6'),
            ('9', '-9'),
            ('5', '5'),
            ('0', '0'),
            ('456', '456'),
            ('46', '4-6'),
            ('4*6', '4*6'),
            ('456', '456'),
            ('S46', 'S4-6'),
            # Number key.
            ('#S', '#S'),
            ('#A', '#A'),
            ('#0', '0'),
            ('#6', '-6'),
            # Implicit hyphens.
            ('SA-', 'SA'),
            ('SA-R', 'SAR'),
            ('-O', 'O'),
            ('S*-R', 'S*R'),
            ('S-*R', 'S*R'),
        )

        for arg, expected in cases:
            result = '/'.join(normalize_steno(arg))
            msg = 'normalize_steno(%r)=%r != %r' % (
                arg,
                result,
                expected,
            )
            self.assertEqual(result, expected, msg=msg)
Exemple #29
0
    def load_yaml_file(filename: str) -> Iterable[Tuple[Tuple[str], str]]:
        '''
        Loads a YAML dictionary file and provides an iterable to its
        stroke to translation mappings.

        :param filename: The file path of the YAML dictionary to load.
        :return: An iterable that provides tuples of stroke tuple, translation.
        '''

        # Load the data. Can't use the round-trip loader for performance reasons
        yaml = ruamel.yaml.YAML(typ='safe')

        with open(filename, 'r', encoding='utf-8') as in_file:
            data = yaml.load(in_file)

        # Provide tuples of stroke tuple, translation
        for translation, strokes in data.items():
            for stroke in strokes:
                yield (normalize_steno(stroke), translation)
Exemple #30
0
 def test_capitalized_fingerspelling_spaces_after(self):
     # Using the asterisk key to delete letters in fingerspelled words
     # occasionally causes problems when the space placement is set to
     # "After Output".
     for steno, translation in (
         ('HRAOEUBG', 'like'   ),
         ('T*'      , '{&T}'),
         ('A*'      , '{&A}'),
         ('KR*'     , '{&C}'),
         ('O*'      , '{&O}'),
         ('S*'      , '{&S}'),
     ):
         self.dictionary.set(normalize_steno(steno), translation)
     self.formatter.set_space_placement('After Output')
     for steno in (
         'HRAOEUBG',
         'T*', 'A*', 'KR*', 'O*', 'S*',
     ):
         stroke = steno_to_stroke(steno)
         self.translator.translate(stroke)
     self.assertEqual(self.output.text, u'like TACOS ')
Exemple #31
0
    def test_normalize_steno(self):
        cases = (
            # TODO: More cases
            ("S", "S"),
            ("S-", "S"),
            ("-S", "-S"),
            ("ES", "ES"),
            ("-ES", "ES"),
            ("TW-EPBL", "TWEPBL"),
            ("TWEPBL", "TWEPBL"),
            ("19", "1-9"),
            ("14", "14"),
            ("146", "14-6"),
            ("67", "-67"),
            ("6", "-6"),
            ("9", "-9"),
            ("5", "5"),
            ("0", "0"),
            ("456", "456"),
            ("46", "4-6"),
            ("4*6", "4*6"),
            ("456", "456"),
            ("S46", "S4-6"),
            # Number key.
            ("#S", "#S"),
            ("#A", "#A"),
            ("#0", "0"),
            ("#6", "-6"),
            # Implicit hyphens.
            ("SA-", "SA"),
            ("SA-R", "SAR"),
            ("-O", "O"),
            ("S*-R", "S*R"),
            ("S-*R", "S*R"),
        )

        for arg, expected in cases:
            result = "/".join(normalize_steno(arg))
            msg = "normalize_steno(%r)=%r != %r" % (arg, result, expected)
            self.assertEqual(result, expected, msg=msg)
Exemple #32
0
 def setData(self, index, value, role=Qt.EditRole, record=True):
     assert role == Qt.EditRole
     row = index.row()
     column = index.column()
     old_item = self._entries[row]
     strokes = old_item.strokes
     steno, translation, dictionary = old_item
     if column == _COL_STENO:
         strokes = normalize_steno(value.strip())
         steno = '/'.join(strokes)
         if not steno or steno == old_item.steno:
             return False
     elif column == _COL_TRANS:
         translation = unescape_translation(value.strip())
         if translation == old_item.translation:
             return False
     elif column == _COL_DICT:
         path = expand_path(value)
         for dictionary in self._dictionary_list:
             if dictionary.path == path:
                 break
         if dictionary == old_item.dictionary:
             return False
     try:
         del old_item.dictionary[old_item.strokes]
     except KeyError:
         pass
     if not old_item.strokes and not old_item.translation:
         # Merge operations when editing a newly added row.
         if self._operations and self._operations[-1] == [(None, old_item)]:
             self._operations.pop()
             old_item = None
     new_item = DictionaryItem(steno, translation, dictionary)
     self._entries[row] = new_item
     dictionary[strokes] = translation
     if record:
         self._operations.append((old_item, new_item))
     self.dataChanged.emit(index, index)
     return True
Exemple #33
0
 def test_normalize_steno(self):
     cases = (
     
     # TODO: More cases
     ('S', 'S'),
     ('S-', 'S'),
     ('-S', '-S'),
     ('ES', 'ES'),
     ('-ES', 'ES'),
     ('TW-EPBL', 'TWEPBL'),
     ('TWEPBL', 'TWEPBL'),
     ('19', '1-9'),
     ('14', '14'),
     ('146', '14-6'),
     ('67', '67'),
     ('46', '4-6'),
     ('456', '456'),
     ('S46', 'S4-6'),
     )
     
     for arg, expected in cases:
         self.assertEqual('/'.join(normalize_steno(arg)), expected)
 def test_basic(self):
     d = load_dictionary(
         os.path.join(os.path.dirname(__file__), 'show_stroke.py'))
     self.assertEqual(d.readonly, True)
     self.assertEqual(d.longest_key, 2)
     with self.assertRaises(KeyError):
         d[('STR', )]
     self.assertEqual(d.get(('STR', )), None)
     self.assertEqual(d[('STR*', 'STR')], 'STR')
     self.assertEqual(d.get(('STR*', 'STR')), 'STR')
     self.assertEqual(d.reverse_lookup('STR'), ())
     self.dictionary.set_dicts([d] + self.dictionary.dicts)
     self.dictionary.set(normalize_steno('STR'), u'center')
     for steno in (
             'STR',
             'STR*',
             'STR',
             'STR',
     ):
         stroke = steno_to_stroke(steno)
         self.translator.translate(stroke)
     self.assertEqual(self.output.text, u' center STR center')
Exemple #35
0
 def setData(self, index, value, role=Qt.EditRole, record=True):
     assert role == Qt.EditRole
     row = index.row()
     column = index.column()
     old_item = self._entries[row]
     strokes, translation, dictionary = old_item
     if column == _COL_STENO:
         strokes = normalize_steno(value.strip())
         if not strokes or strokes == old_item.strokes:
             return False
     elif column == _COL_TRANS:
         translation = unescape_translation(value.strip())
         if translation == old_item.translation:
             return False
     elif column == _COL_DICT:
         path = expand_path(value)
         for dictionary in self._dictionary_list:
             if dictionary.get_path() == path:
                 break
         if dictionary == old_item.dictionary:
             return False
     try:
         del old_item.dictionary[old_item.strokes]
     except KeyError:
         pass
     if not old_item.strokes and not old_item.translation:
         # Merge operations when editing a newly added row.
         if self._operations and self._operations[-1] == [(None, old_item)]:
             self._operations.pop()
             old_item = None
     new_item = DictionaryItem(strokes, translation, dictionary)
     self._entries[row] = new_item
     dictionary[strokes] = translation
     if record:
         self._operations.append((old_item, new_item))
     self.dataChanged.emit(index, index)
     return True
Exemple #36
0
 def define(self, key, value):
     key = normalize_steno(key)
     self.d[key] = value
Exemple #37
0
 def h(pairs):
     return StenoDictionary((normalize_steno(x[0]), x[1]) for x in pairs)
 def _strokes(self):
     strokes = self.strokes.text().replace('/', ' ').split()
     if not strokes:
         return ()
     return normalize_steno('/'.join(strokes))
Exemple #39
0
 def _strokes(self):
     strokes = self.strokes.text().replace('/', ' ').split()
     if not strokes:
         return ()
     return normalize_steno('/'.join(strokes))
 def define(self, key, value):
     key = normalize_steno(key)
     self.d[key] = value
Exemple #41
0
 def _normalized_strokes(self):
     strokes = self.strokes_text.GetValue().replace('/', ' ').split()
     strokes = normalize_steno('/'.join(strokes))
     return strokes
Exemple #42
0
def test_normalize_steno(steno, strokes):
    result = '/'.join(normalize_steno(steno))
    msg = 'normalize_steno(%r)=%r != %r' % (
        steno, result, strokes,
    )
    assert result == strokes, msg
Exemple #43
0
 def strokes(self):
     return normalize_steno(self.steno)
Exemple #44
0
 def _normalized_strokes(self):
     strokes = self.strokes_text.GetValue().upper().replace('/', ' ').split()
     strokes = normalize_steno('/'.join(strokes))
     return strokes
Exemple #45
0
 def parse_entries(entries):
     return {
         normalize_steno(k): v
         for k, v in ast.literal_eval('{' + entries + '}').items()
     }
 def _splitStrokes(self, strokes_string):
     result = normalize_steno(strokes_string.upper())
     return result
Exemple #47
0
 def on_apply_filter(self):
     strokes_filter = '/'.join(normalize_steno(self.strokes_filter.text().strip()))
     translation_filter = unescape_translation(self.translation_filter.text().strip())
     self._model.filter(strokes_filter=strokes_filter,
                        translation_filter=translation_filter)
 def _splitStrokes(self, strokes_string):
     result = normalize_steno(strokes_string.upper())
     return result
 def h(pairs):
     return StenoDictionary((normalize_steno(x[0]), x[1]) for x in pairs)