def run_list(self, target): if target.lower() in [ x.lower() for x in self.target_utilities.get_target_names() ]: pluralizer = Pluralizer() util_name = pluralizer.singular( target.lower().capitalize()) + "Utilities" util_module = __import__(util_name) util_class = getattr(util_module, util_name) util_instance = util_class() util_instance.list() else: print(self.get_list_doc())
def test_allow_new_plural_matching_rules_to_be_strings(self): pluralizer = Pluralizer() self.assertEqual(pluralizer.plural('person'), 'people') pluralizer.addPluralRule('person', 'peeps') self.assertEqual(pluralizer.plural('person'), 'peeps')
def test_add_new_singular_matching_rules(self): pluralizer = Pluralizer() self.assertEqual(pluralizer.singular('singles'), 'single') pluralizer.addSingularRule(re.compile('singles$'), 'singular') self.assertEqual(pluralizer.singular('singles'), 'singular')
def test_add_new_plural_matching_rules(self): pluralizer = Pluralizer() self.assertEqual(pluralizer.plural('regex'), 'regexes') pluralizer.addPluralRule(re.compile(r'(?i)gex$'), 'gexii') self.assertEqual(pluralizer.plural('regex'), 'regexii')
def test_return_false_for_irregular_words(self): pluralizer = Pluralizer() self.assertTrue(pluralizer.isPlural('irregulars')) pluralizer.addIrregularRule('irregulars', 'regular') self.assertFalse(pluralizer.isPlural('irregulars'))
def test_add_new_irregular_words(self): pluralizer = Pluralizer() self.assertEqual(pluralizer.pluralize('irregular'), 'irregulars') pluralizer.addIrregularRule('irregular', 'regular') self.assertEqual(pluralizer.pluralize('irregular'), 'regular')
def test_add_new_uncountable_rules(self): pluralizer = Pluralizer() self.assertEqual(pluralizer.pluralize('paper'), 'papers') pluralizer.addUncountableRule('paper') self.assertEqual(pluralizer.pluralize('paper'), 'paper')
def test_methods_plural(self): pluralizer = Pluralizer() for test in [*BASIC_TESTS, *PLURAL_TESTS]: self.assertEqual(pluralizer.plural(test[0]), test[1])
def test_prepend_count_plural_words(self): pluralizer = Pluralizer() self.assertEqual(pluralizer.pluralize('test', 5, True), '5 tests')
def test_automatically_convert_singular(self): pluralizer = Pluralizer() for test in [*BASIC_TESTS, *SINGULAR_TESTS]: self.assertEqual(pluralizer.pluralize(test[0], 1), test[0]) self.assertEqual(pluralizer.pluralize(test[1], 1), test[0])
def test_automatically_convert_plural(self): pluralizer = Pluralizer() for test in [*BASIC_TESTS, *PLURAL_TESTS]: self.assertEqual(pluralizer.pluralize(test[1], 5), test[1]) self.assertEqual(pluralizer.pluralize(test[0], 5), test[1])
def test_methods_is_singular(self): pluralizer = Pluralizer() for test in [*BASIC_TESTS, *SINGULAR_TESTS]: self.assertTrue(pluralizer.isSingular(test[0]))
def test_methods_singular(self): pluralizer = Pluralizer() for test in [*BASIC_TESTS, *SINGULAR_TESTS]: self.assertEqual(pluralizer.singular(test[1]), test[0])
def test_methods_is_plural(self): pluralizer = Pluralizer() for test in [*BASIC_TESTS, *PLURAL_TESTS]: self.assertTrue(pluralizer.isPlural(test[1]), f"isPlural('{test[1]}')")
def test_allow_new_singular_matching_rules_to_be_strings(self): pluralizer = Pluralizer() self.assertEqual(pluralizer.singular('mornings'), 'morning') pluralizer.addSingularRule('mornings', 'suck') self.assertEqual(pluralizer.singular('mornings'), 'suck')
from PyDictionary import PyDictionary dictionary = PyDictionary() from pattern.en import conjugate, lemma, lexeme import string, nltk, random from pluralizer import Pluralizer pluralizer = Pluralizer() # from nltk import word_tokenizei nltk.download('averaged_perceptron_tagger') # nltk.download('tagsets') from nltk import pos_tag def GETSIMP(pos): if pos[0:2] == 'NN': # is noun? simpPos = 'NN' plural = False cap = False if 'S' in pos: plural = True if 'P' in pos: cap = True return simpPos, plural, cap elif pos[0:2] == 'JJ': # is adjective? simpPos = 'JJ' comp = False sup = False if 'R' in pos: comp = True
def test_prepend_count_singular_words(self): pluralizer = Pluralizer() self.assertEqual(pluralizer.pluralize('test', 1, True), '1 test')