Esempio n. 1
0
  def _add_restful_routes(self, app, klass, resource_name, resource_base):
    self.app = app

    engine = inflect.engine()
    split_name = resource_name.split("_")

    entity_id = engine.singular_noun(split_name[-1])
    if not entity_id:
      entity_id = split_name[-1]

    if "index" not in klass.except_methods:
      self._add_route(klass, resource_base, "index", ["GET"])

    if "create" not in klass.except_methods:
      self._add_route(klass, resource_base, "create", ["POST"])

    if "show" not in klass.except_methods:
      self._add_route(klass, resource_base + "/<%s_id>" % entity_id, "show", ["GET"])

    if "update" not in klass.except_methods:
      self._add_route(klass, resource_base + "/<%s_id>" % entity_id, "update", ["PUT"])

    if "delete" not in klass.except_methods:
      self._add_route(klass, resource_base + "/<%s_id>" % entity_id, "delete", ["DELETE"])

    self._add_bypass_route(klass, resource_base + "/<%s_id>" % entity_id)
Esempio n. 2
0
def make_singular(self, field):
    p = inflect.engine()
    field_value = p.singular_noun(field)
    if field_value is False:
        return field
    else:
        return field_value
Esempio n. 3
0
def normalize_dispatch_address(addr, city='Fargo, ND'):
    '''
        Returns a postgis friendly address from a dispatch log entry.

        * removes "BLK" if present
        * pluralizes street name (34 => 34th)
    '''
    import inflect
    p = inflect.engine()
    addr = addr.lower()
    number, street_chunk = 0, ''
    if ("blk" in addr):
        t = addr.split(" blk ")
        number = t[0]
        street_chunk = t[1]
        # Quick quirky first
        if (int(number) == 0):
            number = 1
    else:
        '''
            If there is no BLK, it's likely just a street like:
            9 Ave S
        '''
        # print 'address is invalid: ', addr
        return None
    parts = street_chunk.split(' ')
    street_name = parts[0]
    try:
        street_name = int(street_name)
        street_name = p.ordinal(street_name)
    except ValueError:
        # Non-numeric street like Roberts Street
        pass
    address = ' '.join([str(number), street_name, ' '.join(parts[1:])])
    return address + " " + city
Esempio n. 4
0
def _pluralize(name):
  pluralize = inflect.engine()
  names = [ i for i in name.split("_") ]
  names[-1] = pluralize.plural(names[-1])

  plural_name = "".join([ n.capitalize() for n in names])
  return plural_name
def create_all_vars(prebiotics_final):
    inflect_engine = inflect.engine()
    # pluralize
    prebiotic_plural = [inflect_engine.plural(prebiotic) for prebiotic in prebiotics_final]
    prebiotics_final = prebiotics_final + prebiotic_plural

    # lower
    prebiotic_lower = [prebiotic.lower() for prebiotic in prebiotics_final]
    prebiotics_final = prebiotics_final + prebiotic_lower

    # capitalize
    prebiotic_capitalized = [prebiotic.capitalize() for prebiotic in prebiotics_final]
    prebiotics_final = prebiotics_final + prebiotic_capitalized

    # upper
    prebiotic_upper = [prebiotic.upper() for prebiotic in prebiotics_final]
    prebiotics_final = prebiotics_final + prebiotic_upper

    # remove 2-letters words
    prebiotics_final = [prebiotic for prebiotic in prebiotics_final if len(prebiotic) > 2]

    # capitalize all 3-letters words
    prebiotics_3_letter_capitalized = [prebiotic.upper() for prebiotic in prebiotics_final if len(prebiotic) == 3]
    prebiotics_final = [prebiotic for prebiotic in prebiotics_final if len(prebiotic) != 3]
    prebiotics_final = prebiotics_final + prebiotics_3_letter_capitalized

    # remove SOS
    prebiotics_final = [prebiotic for prebiotic in prebiotics_final if prebiotic.lower() != 'sos']
    return prebiotics_final
Esempio n. 6
0
	def __iter__(self):
		inflect_eng = inflect.engine()
		for current_pass in range(1, self.passes+1):
			current_pass_th = inflect_eng.ordinal(current_pass)
			log.info('Modifying command for %s pass.' % current_pass_th)
			method = [self.early_pass, self.last_pass][current_pass == self.passes]
			yield method(current_pass)
Esempio n. 7
0
def add_to(n_table):

    infl = inflect.engine()

    def add(noun, senses, plural=True):
        forms = [noun]
        if plural: forms.append(infl.plural_noun(noun))
        for form in forms:
            if form not in n_table: n_table[form] = set()
            n_table[form] |= senses  # Potentially non-overlapping senses.

    add('actor', {'/film/actor', '/tv/actor'})
    add('album', {'/music/album'})
    add('anyone', {'/people/person'}, plural=False)
    add('anything', set(), plural=False)
    add('anywhere', {'/location/location'}, plural=False)
    add('artist', {'/music/artist'})
    add('character', {'/fictional_universe/fictional_character'})
    add('director', {'/film/director'})
    add('place', {'/location/location'})
    add('thing', set())
    add('recording', {'/music/recording'})
    add('release', {'/music/release'})
    add('someone', {'/people/person'}, plural=False)
    add('something', set(), plural=False)
    add('somewhere', {'/location/location'}, plural=False)
    add('work', {'/visual_art/artwork'})
Esempio n. 8
0
def test_lines():
    p = inflect.engine()
    eq_(p.number_to_words(999, threshold=500), '999', msg=' 999 -> 999')
    eq_(p.number_to_words(1000, threshold=500), '1,000', msg='1000 -> 1,000')
    eq_(p.number_to_words(10000, threshold=500), '10,000', msg='10000 -> 10,000')
    eq_(p.number_to_words(100000, threshold=500), '100,000', msg='100000 -> 100,000')
    eq_(p.number_to_words(1000000, threshold=500), '1,000,000', msg='1000000 -> 1,000,000')

    eq_(p.number_to_words(999.3, threshold=500), '999.3', msg=' 999.3 -> 999.3')
    eq_(p.number_to_words(1000.3, threshold=500), '1,000.3', msg='1000.3 -> 1,000.3')
    eq_(p.number_to_words(10000.3, threshold=500), '10,000.3', msg='10000.3 -> 10,000.3')
    eq_(p.number_to_words(100000.3, threshold=500), '100,000.3', msg='100000.3 -> 100,000.3')
    eq_(p.number_to_words(1000000.3, threshold=500), '1,000,000.3', msg='1000000.3 -> 1,000,000.3')

    eq_(p.number_to_words(999, threshold=500, comma=0), '999', msg=' 999 -> 999')
    eq_(p.number_to_words(1000, threshold=500, comma=0), '1000', msg='1000 -> 1000')
    eq_(p.number_to_words(10000, threshold=500, comma=0), '10000', msg='10000 -> 10000')
    eq_(p.number_to_words(100000, threshold=500, comma=0), '100000', msg='100000 -> 100000')
    eq_(p.number_to_words(1000000, threshold=500, comma=0), '1000000', msg='1000000 -> 1000000')

    eq_(p.number_to_words(999.3, threshold=500, comma=0), '999.3', msg=' 999.3 -> 999.3')
    eq_(p.number_to_words(1000.3, threshold=500, comma=0), '1000.3', msg='1000.3 -> 1000.3')
    eq_(p.number_to_words(10000.3, threshold=500, comma=0), '10000.3', msg='10000.3 -> 10000.3')
    eq_(p.number_to_words(100000.3, threshold=500, comma=0), '100000.3', msg='100000.3 -> 100000.3')
    eq_(p.number_to_words(1000000.3, threshold=500, comma=0), '1000000.3', msg='1000000.3 -> 1000000.3')
Esempio n. 9
0
	def sumTo(n):
		total = 0
		p = inflect.engine()
		for i in xrange(1,n+1):
			word = p.number_to_words(i).replace("-","").replace(" ","")
			total = total + len(word)
		return total
Esempio n. 10
0
def test_def():
    p = inflect.engine()

    p.defnoun("kin", "kine")
    p.defnoun('(.*)x', '$1xen')

    p.defverb('foobar',  'feebar',
              'foobar',  'feebar',
              'foobars', 'feebar')

    p.defadj('red', 'red|gules')

    eq_(p.no("kin", 0), "no kine", msg="kin -> kine (user defined)...")
    eq_(p.no("kin", 1), "1 kin")
    eq_(p.no("kin", 2), "2 kine")

    eq_(p.no("regex", 0), "no regexen", msg="regex -> regexen (user defined)")

    eq_(p.plural("foobar", 2), "feebar", msg="foobar -> feebar (user defined)...")
    eq_(p.plural("foobars", 2), "feebar")

    eq_(p.plural("red", 0), "red", msg="red -> red...")
    eq_(p.plural("red", 1), "red")
    eq_(p.plural("red", 2), "red")
    p.classical(all=True)
    eq_(p.plural("red", 0), "red", msg="red -> gules...")
    eq_(p.plural("red", 1), "red")
    eq_(p.plural("red", 2), "gules")
Esempio n. 11
0
    def test_num(self):
        # def num
        p = inflect.engine()
        self.assertTrue(p.persistent_count is None)

        p.num()
        self.assertTrue(p.persistent_count is None)

        ret = p.num(3)
        self.assertEqual(p.persistent_count, 3)
        self.assertEqual(ret, '3')

        p.num()
        ret = p.num("3")
        self.assertEqual(p.persistent_count, 3)
        self.assertEqual(ret, '3')

        p.num()
        ret = p.num(count=3, show=1)
        self.assertEqual(p.persistent_count, 3)
        self.assertEqual(ret, '3')

        p.num()
        ret = p.num(count=3, show=0)
        self.assertEqual(p.persistent_count, 3)
        self.assertEqual(ret, '')

        self.assertRaises(BadNumValueError, p.num, 'text')
def letter_count(num):
    p = inflect.engine()
    total = 0
    for n in range(1, num + 1):
        line = re.sub('[ -]', '', p.number_to_words(n))
        total += len(line)
    return total
Esempio n. 13
0
    def test_count(self):
        p = inflect.engine()
        for txt, num in (
            (1, 1),
            (2, 2),
            (0, 2),
            (87, 2),
            (-7, 2),
            ('1', 1),
            ('2', 2),
            ('0', 2),
            ('no', 2),
            ('zero', 2),
            ('nil', 2),
            ('a', 1),
            ('an', 1),
            ('one', 1),
            ('each', 1),
            ('every', 1),
            ('this', 1),
            ('that', 1),
            ('dummy', 2),
        ):
            self.assertEqual(p.get_count(txt), num)

        self.assertEqual(p.get_count(), '')
        p.num(3)
        self.assertEqual(p.get_count(), 2)
Esempio n. 14
0
 def test__pl_special_verb(self):
     p = inflect.engine()
     self.assertEqual(p._pl_special_verb(''), False)
     self.assertEqual(p._pl_special_verb('am'), 'are')
     self.assertEqual(p._pl_special_verb('am', 0), 'are')
     self.assertEqual(p._pl_special_verb('runs', 0), 'run')
     p.classical(zero=True)
     self.assertEqual(p._pl_special_verb('am', 0), False)
     self.assertEqual(p._pl_special_verb('am', 1), 'am')
     self.assertEqual(p._pl_special_verb('am', 2), 'are')
     self.assertEqual(p._pl_special_verb('runs', 0), False)
     self.assertEqual(p._pl_special_verb('am going to'), 'are going to')
     self.assertEqual(p._pl_special_verb('did'), 'did')
     self.assertEqual(p._pl_special_verb("wasn't"), "weren't")
     self.assertEqual(p._pl_special_verb("shouldn't"), "shouldn't")
     self.assertEqual(p._pl_special_verb('bias'), False)
     self.assertEqual(p._pl_special_verb('news'), False)
     self.assertEqual(p._pl_special_verb('Jess'), False)
     self.assertEqual(p._pl_special_verb(' '), False)
     self.assertEqual(p._pl_special_verb('brushes'), 'brush')
     self.assertEqual(p._pl_special_verb('fixes'), 'fix')
     self.assertEqual(p._pl_special_verb('quizzes'), 'quiz')
     self.assertEqual(p._pl_special_verb('fizzes'), 'fizz')
     self.assertEqual(p._pl_special_verb('dresses'), 'dress')
     self.assertEqual(p._pl_special_verb('flies'), 'fly')
     self.assertEqual(p._pl_special_verb('canoes'), 'canoe')
     self.assertEqual(p._pl_special_verb('horseshoes'), 'horseshoe')
     self.assertEqual(p._pl_special_verb('does'), 'do')
     self.assertEqual(p._pl_special_verb('zzzoes'), 'zzzo')  # TODO: what's a real word to test this case?
     self.assertEqual(p._pl_special_verb('runs'), 'run')
Esempio n. 15
0
    def test_inflect(self):
        p = inflect.engine()
        for txt, ans in (
            ("num(1)", "1"),
            ("num(1,0)", "1"),
            ("num(1,1)", "1"),
            ("num(1)   ", "1   "),
            ("   num(1)   ", "   1   "),
            ("num(3) num(1)", "3 1"),
        ):
            self.assertEqual(p.inflect(txt), ans, msg='p.inflect("%s") != "%s"' % (txt, ans))

        for txt, ans in (
            ("plural(rock)", "rocks"),
            ("plural(rock)  plural(child)", "rocks  children"),
            ("num(2) plural(rock)  plural(child)", "2 rocks  children"),

            ("plural(rock) plural_noun(rock) plural_verb(rocks) plural_adj(big) a(ant)",
             "rocks rocks rock big an ant"),

            ("an(rock) no(cat) ordinal(3) number_to_words(1234) present_participle(runs)",
             "a rock no cats 3rd one thousand, two hundred and thirty-four running"),

            # TODO: extra space when space before number. Is this desirable?
            ("a(cat,0) a(cat,1) a(cat,2) a(cat, 2)", "0 cat a cat 2 cat  2 cat"),
        ):
            self.assertEqual(p.inflect(txt), ans, msg='p.inflect("%s") != "%s"' % (txt, ans))
Esempio n. 16
0
 def test__pl_check_plurals_N(self):
     p = inflect.engine()
     self.assertEqual(p._pl_check_plurals_N('index', 'indices'), False)
     self.assertEqual(p._pl_check_plurals_N('indexes', 'indices'), True)
     self.assertEqual(p._pl_check_plurals_N('indices', 'indexes'), True)
     self.assertEqual(p._pl_check_plurals_N('stigmata', 'stigmas'), True)
     self.assertEqual(p._pl_check_plurals_N('phalanxes', 'phalanges'), True)
Esempio n. 17
0
def article(l):
    p = inflect.engine()
    if len(l) == 1:
        return p.an(l[0])
    else:
        print "Bad Parameter Count 'article'"
    return ''
Esempio n. 18
0
def ia(l):
    p = inflect.engine()
    if len(l) == 1:
        return p.inflect(l[0])
    else:
        print "Bad Parameter Count 'ia'"
    return ''
def getRespondents(versus_found_at,lines):
        i = versus_found_at + 1
        respondents_text = ''
        no_of_respondents = 0
        counsel_found_at = 0
        respondents_list = []
        p= inflect.engine()
        while i > 0:
                match = re.search('defendant[s]?|respondent[s]?',lines[i],re.I)
                if match is not None:
                        no_of_respondents += 1
                        prev_length = len(respondents_text)
                        respondent_type = lines[i][match.start():match.end()]
                        respondent_type = p.singular_noun(respondent_type) if  p.singular_noun(respondent_type) else respondent_type
                        #print type
                        respondents_text =  respondents_text + lines[i]
                        if no_of_respondents == 1:
                                respondent={}
                                respondent['party_name']=respondents_text[:respondents_text.find("...")]
                                respondent['respondent_type'] = respondent_type
                                respondents_list.append(respondent)
                        else:
                                match_list = re.finditer(r'\d{1}\]{1}([^.]+)[.]',respondents_text,re.I)
                                for match in match_list:
                                        respondent={}
                                        respondent['party_name']=match.group(1)
                                        respondent['respondent_type'] = respondent_type
                                        respondents_list.append(respondent)
                        counsel_found_at = i+1
                        break
                respondents_text =  respondents_text + lines[i]
                no_of_respondents += 1
                i+=1
        return respondents_list,counsel_found_at
Esempio n. 20
0
    def test_count(self):
        p = inflect.engine()
        for txt, num in (
            (1, 1),
            (2, 2),
            (0, 2),
            (87, 2),
            (-7, 2),
            ("1", 1),
            ("2", 2),
            ("0", 2),
            ("no", 2),
            ("zero", 2),
            ("nil", 2),
            ("a", 1),
            ("an", 1),
            ("one", 1),
            ("each", 1),
            ("every", 1),
            ("this", 1),
            ("that", 1),
            ("dummy", 2),
        ):
            self.assertEqual(p.get_count(txt), num)

        self.assertEqual(p.get_count(), "")
        p.num(3)
        self.assertEqual(p.get_count(), 2)
Esempio n. 21
0
 def test__pl_check_plurals_N(self):
     p = inflect.engine()
     self.assertEqual(p._pl_check_plurals_N("index", "indices"), False)
     self.assertEqual(p._pl_check_plurals_N("indexes", "indices"), True)
     self.assertEqual(p._pl_check_plurals_N("indices", "indexes"), True)
     self.assertEqual(p._pl_check_plurals_N("stigmata", "stigmas"), True)
     self.assertEqual(p._pl_check_plurals_N("phalanxes", "phalanges"), True)
Esempio n. 22
0
    def test_sinoun(self):
        p = inflect.engine()
        for sing, plur in (
            ("cat", "cats"),
            ("die", "dice"),
            ("status", "status"),
            ("hiatus", "hiatus"),
            ("goose", "geese"),
        ):
            self.assertEqual(p.singular_noun(plur), sing)
            self.assertEqual(p.inflect("singular_noun('%s')" % plur), sing)

        self.assertEqual(p.singular_noun("cats", count=2), "cats")

        self.assertEqual(p.singular_noun("zombies"), "zombie")

        self.assertEqual(p.singular_noun("shoes"), "shoe")

        self.assertEqual(p.singular_noun("Matisses"), "Matisse")
        self.assertEqual(p.singular_noun("bouillabaisses"), "bouillabaisse")

        self.assertEqual(p.singular_noun("quartzes"), "quartz")

        self.assertEqual(p.singular_noun("Nietzsches"), "Nietzsche")
        self.assertEqual(p.singular_noun("aches"), "ache")

        self.assertEqual(p.singular_noun("Clives"), "Clive")
        self.assertEqual(p.singular_noun("weaves"), "weave")
Esempio n. 23
0
    def test_inflect(self):
        p = inflect.engine()
        for txt, ans in (
            ("num(1)", "1"),
            ("num(1,0)", ""),
            ("num(1,1)", "1"),
            ("num(1)   ", "1   "),
            ("   num(1)   ", "   1   "),
            ("num(3) num(1)", "3 1"),
        ):
            self.assertEqual(
                p.inflect(txt), ans, msg='p.inflect("{}") != "{}"'.format(txt, ans)
            )

        for txt, ans in (
            ("plural('rock')", "rocks"),
            ("plural('rock')  plural('child')", "rocks  children"),
            ("num(2) plural('rock')  plural('child')", "2 rocks  children"),
            (
                "plural('rock') plural_noun('rock') plural_verb('rocks') "
                "plural_adj('big') a('ant')",
                "rocks rocks rock big an ant",
            ),
            (
                "an('rock') no('cat') ordinal(3) number_to_words(1234) "
                "present_participle('runs')",
                "a rock no cats 3rd one thousand, two hundred and thirty-four running",
            ),
            ("a('cat',0) a('cat',1) a('cat',2) a('cat', 2)", "0 cat a cat 2 cat 2 cat"),
        ):
            self.assertEqual(
                p.inflect(txt), ans, msg='p.inflect("{}") != "{}"'.format(txt, ans)
            )
Esempio n. 24
0
 def test__pl_special_verb(self):
     p = inflect.engine()
     self.assertEqual(p._pl_special_verb(""), False)
     self.assertEqual(p._pl_special_verb("am"), "are")
     self.assertEqual(p._pl_special_verb("am", 0), "are")
     self.assertEqual(p._pl_special_verb("runs", 0), "run")
     p.classical(zero=True)
     self.assertEqual(p._pl_special_verb("am", 0), False)
     self.assertEqual(p._pl_special_verb("am", 1), "am")
     self.assertEqual(p._pl_special_verb("am", 2), "are")
     self.assertEqual(p._pl_special_verb("runs", 0), False)
     self.assertEqual(p._pl_special_verb("am going to"), "are going to")
     self.assertEqual(p._pl_special_verb("did"), "did")
     self.assertEqual(p._pl_special_verb("wasn't"), "weren't")
     self.assertEqual(p._pl_special_verb("shouldn't"), "shouldn't")
     self.assertEqual(p._pl_special_verb("bias"), False)
     self.assertEqual(p._pl_special_verb("news"), False)
     self.assertEqual(p._pl_special_verb("Jess"), False)
     self.assertEqual(p._pl_special_verb(" "), False)
     self.assertEqual(p._pl_special_verb("brushes"), "brush")
     self.assertEqual(p._pl_special_verb("fixes"), "fix")
     self.assertEqual(p._pl_special_verb("quizzes"), "quiz")
     self.assertEqual(p._pl_special_verb("fizzes"), "fizz")
     self.assertEqual(p._pl_special_verb("dresses"), "dress")
     self.assertEqual(p._pl_special_verb("flies"), "fly")
     self.assertEqual(p._pl_special_verb("canoes"), "canoe")
     self.assertEqual(p._pl_special_verb("horseshoes"), "horseshoe")
     self.assertEqual(p._pl_special_verb("does"), "do")
     # TODO: what's a real word to test this case?
     self.assertEqual(p._pl_special_verb("zzzoes"), "zzzo")
     self.assertEqual(p._pl_special_verb("runs"), "run")
Esempio n. 25
0
def plural(l):
    p = inflect.engine()
    if len(l) == 1:
        return p.plural(l[0])
    else:
        print "Bad Parameter Count 'plural'"
    return ''
Esempio n. 26
0
def prep_mlf(trsfile, mlffile, word_dictionary, surround, between,
    dialog_file=False):
    
    dict_tmp = {}
    
    infl = inflect.engine()
    
    # Read in the dictionary to ensure all of the words
    # we put in the MLF file are in the dictionary. Words
    # that are not are skipped with a warning.
    f = open(word_dictionary, 'r')
    dictionary = { } # build hash table
    for line in f.readlines():
        if line != "\n" and line != "" :
            dictionary[line.split()[0]] = True
    f.close()
    
    speakers = None
    emotions = None

    if dialog_file:
        dialog = json.load(open(trsfile, 'r'))

        # make sure this is a valid transcript
        try:
            jsonschema.validate(dialog, TRANSCRIPT_SCHEMA)
        except jsonschema.ValidationError, e:
            print "Input transcript file is not in the proper format.\nSee alignment-schemas/transcript_schema.json or https://github.com/srubin/p2fa-steve"
            raise e

        lines = [dl["line"] for dl in dialog]
        speakers = [dl["speaker"] for dl in dialog]
        if "emotion" in dialog[0]:
            emotions = [dl["emotion"] for dl in dialog]
Esempio n. 27
0
    def get_resource_url(cls, resource, base_url):
        """
        Construct the URL for talking to this resource.

        i.e.:

        http://myapi.com/api/resource

        Note that this is NOT the method for calling individual instances i.e.

        http://myapi.com/api/resource/1

        Args:
            resource: The resource class instance
            base_url: The Base URL of this API service.
        returns:
            resource_url: The URL for this resource
        """
        if resource.Meta.resource_name:
            url = '{}/{}'.format(base_url, resource.Meta.resource_name)
        else:
            p = inflect.engine()
            plural_name = p.plural(resource.Meta.name.lower())
            url = '{}/{}'.format(base_url, plural_name)
        return cls._parse_url_and_validate(url)
Esempio n. 28
0
def get_synonyms(word):
    pluralizer = inflect.engine()

    syn_set = []
    wnsynset = wn.synsets(word)


    for i in range(0, len(wnsynset)):
        
        for lemma in wnsynset[i].lemma_names():

            syn_set.append(lemma.lower())
            
# adds plurals and removes dups
    
    syn_setnodup = []
    for item in syn_set:
        if item not in syn_setnodup:
            syn_setnodup.append(item)

    syn_set_final = []
    for item in syn_setnodup:
        syn_set_final.append(item)
        syn_set_final.append(pluralizer.plural(item))

    
    return syn_set_final
Esempio n. 29
0
def main(area, type, pref, frac, file):
  projects = open('../data/projects.csv')
  projects.readline() # header
  good_projects = []

  allarea = 'all' in area
  alltype = 'all' in type
  allpref = 'all' in pref
  
  for line in csv.reader(projects):
    if (line[date_posted].startswith("2010") and 
        (allarea or line[primary_focus_area] in area) and 
        (allpref or line[teacher_prefix] in pref) and 
        (alltype or line[resource_type] in type)):
      good_projects.append(line[_projectid])
  good_projects = frozenset(good_projects)

  p = inflect.engine() # for de-pluralizing words
  out = open(file, 'w')
  resources = open('../data/resources.csv')
  resources.readline() # header
  for line in csv.reader(resources):
    if line[_projectid2] in good_projects:
      text = line[item_name]
      for x in text.strip().lower().replace('&#8217;', '').split():
        word = x
        try:
          word = p.singular_noun(x)
        except:
          pass
        if not word:
          word = x
        if random.random() < frac:
          out.write('%s ' %  word)
Esempio n. 30
0
def t2(word):
    wtnfinal = []
    p = inflect.engine()
    wtnfinal.append(word)
    wtnfinal.append(p.ordinal(word))
    wtnfinal = getwords(word, wtnfinal)
    return wtnfinal
Esempio n. 31
0
import inflect

p = inflect.engine()

def num_let_count(roof):
	total_sum = 0;
	for num in range(1, roof + 1):
		num_eng = p.number_to_words(num)
		num_eng = num_eng.replace('-', '').replace(' ', '')
		total_sum += len(num_eng)

	return total_sum

print(num_let_count(1000))
Esempio n. 32
0
import os
from cleanPony.core.entities import Entity, ValueObject
from stringcase import snakecase
from typing import List, Type, get_type_hints, Any, Set, Dict
from inflect import engine
from dataclasses import fields
from collections import defaultdict

inflector = engine()


class ActionGenerator:
    def __init__(self, entity_cls: Type[Entity], force_rewrite: bool = False):
        self._Entity = entity_cls
        self._force_rewrite = force_rewrite

    @property
    def entity_name(self) -> str:
        return self._Entity.__qualname__

    @property
    def import_path(self) -> str:
        return self._Entity.__module__

    def _pluralize(self, word: str):
        return inflector.plural(word)

    def _write_file(self, file_path: str, content: List[str]):
        if not self._force_rewrite and os.path.exists(file_path):
            raise FileExistsError
Esempio n. 33
0
# -*- coding=utf-8
""" Defines a variety of useful string operators and methods. """

import ast
import datetime
import logging
import re
import sys

from HTMLParser import HTMLParser
from encodings.aliases import aliases

# utilize the inflect engine if possible for plurarlize and singularize
try:
    import inflect
    inflect_engine = inflect.engine()
except ImportError:
    inflect = None
    inflect_engine = None

# defines the different rules for pluralizing a word
PLURAL_RULES = [(re.compile('^goose$'), 'geese'),
                (re.compile('^software$'), 'software'),
                (re.compile('^(?P<single>.+)(=?(?P<suffix>s))$'), 'ses'),
                (re.compile('^(?P<single>.+)(=?(?P<suffix>y))$'), 'ies')]

CONSTANT_EVALS = {'true': True, 'false': False, 'null': None}

COMMON_TERMS = {
    'a', 'about', 'all', 'and', 'are', 'as', 'at', 'be', 'but', 'by'
    'can', 'cannot', 'could', "couldn't", 'do', 'did', "didn't", 'for', 'from',
Esempio n. 34
0
import inflect, time
t, p = time.time(), inflect.engine()
letterAccumulator = 0

def countLetters(n):

	counter = 0

	string = p.number_to_words(n)
	for i in xrange(0, len(string)):	
		if string[i] != "," and string[i] != " " and string[i] != "-":
			counter += 1
	return counter

for i in xrange(1, 1001):
	letterAccumulator += countLetters(i)

print letterAccumulator

print time.time() - t
Esempio n. 35
0
class ReadonlyResources:
    """Super-class of ABC ``Resources`` and all read-only OLD resource views.
    RESTful CRUD(S) interface based on the Atom protocol:

    +-----------------+-------------+--------------------------+--------+
    | Purpose         | HTTP Method | Path                     | Method |
    +=================+=============+==========================+========+
    | Create new      | POST        | /<cllctn_name>           | create |
    +-----------------+-------------+--------------------------+--------+
    | Create data     | GET         | /<cllctn_name>/new       | new    |
    +-----------------+-------------+--------------------------+--------+
    | Read all        | GET         | /<cllctn_name>           | index  |
    +-----------------+-------------+--------------------------+--------+
    | Read specific   | GET         | /<cllctn_name>/<id>      | show   |
    +-----------------+-------------+--------------------------+--------+
    | Update specific | PUT         | /<cllctn_name>/<id>      | update |
    +-----------------+-------------+--------------------------+--------+
    | Update data     | GET         | /<cllctn_name>/<id>/edit | edit   |
    +-----------------+-------------+--------------------------+--------+
    | Delete specific | DELETE      | /<cllctn_name>/<id>      | delete |
    +-----------------+-------------+--------------------------+--------+
    | Search          | SEARCH      | /<cllctn_name>           | search |
    +-----------------+-------------+--------------------------+--------+

    Note: the create, new, update, edit, and delete actions are all exposed via
    the REST API; however, they invariably return 404 responses.
    """

    inflect_p = inflect.engine()
    inflect_p.classical()

    def __init__(self, request):
        self.request = request
        self._db = None
        self._logged_in_user = None
        self._query_builder = None
        self.primary_key = 'id'
        # Names
        if not getattr(self, 'collection_name', None):
            self.collection_name = self.__class__.__name__.lower()
        if not getattr(self, 'hmn_collection_name', None):
            self.hmn_collection_name = self.collection_name
        if not getattr(self, 'member_name', None):
            self.member_name = self.inflect_p.singular_noun(
                self.collection_name)
        if not getattr(self, 'hmn_member_name', None):
            self.hmn_member_name = self.member_name
        if not getattr(self, 'model_name', None):
            self.model_name = self.member_name.capitalize()
        self.schema_cls_name = self.model_name + 'Schema'
        # Classes
        if not getattr(self, 'model_cls', None):
            self.model_cls = getattr(old_models, self.model_name)
        self.schema_cls = getattr(old_schemata, self.schema_cls_name, None)

    @property
    def db(self):
        if not self._db:
            self._db = DBUtils(self.request.dbsession,
                               self.request.registry.settings)
        return self._db

    @property
    def query_builder(self):
        if not self._query_builder:
            self._query_builder = SQLAQueryBuilder(
                self.request.dbsession,
                model_name=self.model_name,
                primary_key=self.primary_key,
                settings=self.request.registry.settings)
        return self._query_builder

    @property
    def logged_in_user(self):
        if not self._logged_in_user:
            user_dict = self.request.session['user']
            self._logged_in_user = self.request.dbsession.query(
                old_models.User).get(user_dict['id'])
        return self._logged_in_user

    ###########################################################################
    # Public CRUD(S) Methods
    ###########################################################################

    def create(self):
        self.request.response.status_int = 404
        LOGGER.warning('Failed attempt to create a read-only %s',
                       self.hmn_member_name)
        return READONLY_RSLT

    def new(self):
        self.request.response.status_int = 404
        LOGGER.warning('Failed attempt to get data for creating a read-only %s',
                       self.hmn_member_name)
        return READONLY_RSLT

    def index(self):
        """Get all resources.

        - URL: ``GET /<resource_collection_name>`` with optional query string
          parameters for ordering and pagination.

        :returns: a JSON-serialized array of resources objects.
        """
        LOGGER.info('Attempting to read all %s', self.hmn_collection_name)
        query = self._eagerload_model(
            self.request.dbsession.query(self.model_cls))
        get_params = dict(self.request.GET)
        try:
            query = self.add_order_by(query, get_params)
            query = self._filter_query(query)
            result = add_pagination(query, get_params)
        except Invalid as error:
            self.request.response.status_int = 400
            errors = error.unpack_errors()
            LOGGER.warning('Attempt to read all %s resulted in an error(s): %s',
                           self.hmn_collection_name, errors)
            return {'errors': errors}
        headers_ctl = self._headers_control(result)
        if headers_ctl is not False:
            return headers_ctl
        LOGGER.info('Reading all %s', self.hmn_collection_name)
        return result

    def show(self):
        """Return a resource, given its id.
        :URL: ``GET /<resource_collection_name>/<id>``
        :param str id: the ``id`` value of the resource to be returned.
        :returns: a resource model object.
        """
        LOGGER.info('Attempting to read a single %s', self.hmn_member_name)
        resource_model, id_ = self._model_from_id(eager=True)
        if not resource_model:
            self.request.response.status_int = 404
            msg = self._rsrc_not_exist(id_)
            LOGGER.warning(msg)
            return {'error': msg}
        if self._model_access_unauth(resource_model) is not False:
            self.request.response.status_int = 403
            LOGGER.warning(UNAUTHORIZED_MSG)
            return UNAUTHORIZED_MSG
        LOGGER.info('Reading a single %s', self.hmn_member_name)
        if dict(self.request.GET).get('minimal'):
            return minimal_model(resource_model)
        return self._get_show_dict(resource_model)

    def update(self):
        self.request.response.status_int = 404
        LOGGER.warning('Failed attempt to update a read-only %s',
                       self.hmn_member_name)
        return READONLY_RSLT

    def edit(self):
        self.request.response.status_int = 404
        LOGGER.warning('Failed attempt to get data for updating a read-only %s',
                       self.hmn_member_name)
        return READONLY_RSLT

    def delete(self):
        self.request.response.status_int = 404
        LOGGER.warning('Failed attempt to delete a read-only %s',
                       self.hmn_member_name)
        return READONLY_RSLT

    def search(self):
        """Return the list of resources matching the input JSON query.

        - URL: ``SEARCH /<resource_collection_name>`` (or ``POST
          /<resource_collection_name>/search``)
        - request body: A JSON object of the form::

              {"query": {"filter": [ ... ], "order_by": [ ... ]},
               "paginator": { ... }}

          where the ``order_by`` and ``paginator`` attributes are optional.
        """
        LOGGER.info('Attempting to search over %s', self.hmn_collection_name)
        try:
            python_search_params = json.loads(
                self.request.body.decode(self.request.charset))
        except ValueError:
            self.request.response.status_int = 400
            LOGGER.warning('Request body was not valid JSON')
            return JSONDecodeErrorResponse
        try:
            sqla_query = self.query_builder.get_SQLA_query(
                python_search_params.get('query'))
        except (OLDSearchParseError, Invalid) as error:
            self.request.response.status_int = 400
            errors = error.unpack_errors()
            LOGGER.warning(
                'Attempt to search over all %s resulted in an error(s): %s',
                self.hmn_collection_name, errors)
            return {'errors': errors}
        # Might be better to catch (OperationalError, AttributeError,
        # InvalidRequestError, RuntimeError):
        except Exception as error:  # FIX: too general exception
            LOGGER.warning('%s\'s filter expression (%s) raised an unexpected'
                           ' exception: %s.',
                           h.get_user_full_name(self.request.session['user']),
                           self.request.body, error)
            self.request.response.status_int = 400
            return {'error': 'The specified search parameters generated an'
                             ' invalid database query'}
        query = self._eagerload_model(sqla_query)
        query = self._filter_query(query)
        try:
            ret = add_pagination(query, python_search_params.get('paginator'))
        except OperationalError:
            self.request.response.status_int = 400
            msg = ('The specified search parameters generated an invalid'
                   ' database query')
            LOGGER.warning(msg)
            return {'error': msg}
        except Invalid as error:  # For paginator schema errors.
            self.request.response.status_int = 400
            errors = error.unpack_errors()
            LOGGER.warning(
                'Attempt to search over all %s resulted in an error(s): %s',
                self.hmn_collection_name, errors)
            return {'errors': errors}
        else:
            LOGGER.info('Successful search over %s', self.hmn_collection_name)
            return ret

    def new_search(self):
        """Return the data necessary to search over this type of resource.

        - URL: ``GET /<resource_collection_name>/new_search``

        :returns: a JSON object with a ``search_parameters`` attribute which
         resolves to an object with attributes ``attributes`` and ``relations``.
        """
        LOGGER.info('Returning search parameters for %s', self.hmn_member_name)
        return {'search_parameters':
                self.query_builder.get_search_parameters()}

    ###########################################################################
    # Private Methods for Override: redefine in views for custom behaviour
    ###########################################################################

    def _get_show_dict(self, resource_model):
        """Return the model as a dict for the return value of a successful show
        request. This is indirected so that resources like collections can
        override and do special things.
        """
        return resource_model.get_dict()

    def _get_create_dict(self, resource_model):
        return self._get_show_dict(resource_model)

    def _get_edit_dict(self, resource_model):
        return self._get_show_dict(resource_model)

    def _get_update_dict(self, resource_model):
        return self._get_create_dict(resource_model)

    def _eagerload_model(self, query_obj):
        """Override this in a subclass with model-specific eager loading."""
        return get_eagerloader(self.model_name)(query_obj)

    def _filter_query(self, query_obj):
        """Override this in a subclass with model-specific query filtering.
        E.g., in the forms view::
            >>> return h.filter_restricted_models(self.model_name, query_obj)
        """
        return query_obj

    def _headers_control(self, result):
        """Take actions based on header values and/or modify headers. If
        something other than ``False`` is returned, that will be the response.
        Useful for Last-Modified/If-Modified-Since caching, e.g., in ``index``
        method of Forms view.
        """
        return False

    def _update_unauth(self, resource_model):
        """Return ``True`` if update of the resource model cannot proceed."""
        return self._model_access_unauth(resource_model)

    def _update_unauth_msg_obj(self):
        """Return the dict that will be returned when ``self._update_unauth()``
        returns ``True``.
        """
        return UNAUTHORIZED_MSG

    def _model_access_unauth(self, resource_model):
        """Implement resource/model-specific access controls based on
        (un-)restricted(-ness) of the current logged in user and the resource
        in question. Return something other than ``False`` to trigger a 403
        response.
        """
        return False

    def _model_from_id(self, eager=False):
        """Return a particular model instance (and the id value), given the
        model id supplied in the URL path.
        """
        id_ = int(self.request.matchdict['id'])
        if eager:
            return (
                self._eagerload_model(
                    self.request.dbsession.query(self.model_cls)).get(id_),
                id_)
        return self.request.dbsession.query(self.model_cls).get(id_), id_

    ###########################################################################
    # Utilities
    ###########################################################################

    def _filter_restricted_models(self, query):
        user = self.logged_in_user
        if self.db.user_is_unrestricted(user):
            return query
        return _filter_restricted_models_from_query(self.model_name, query,
                                                    user)

    def _rsrc_not_exist(self, id_):
        return 'There is no %s with %s %s' % (self.hmn_member_name,
                                              self.primary_key, id_)

    def add_order_by(self, query, order_by_params, query_builder=None):
        """Add an ORDER BY clause to the query using the get_SQLA_order_by
        method of the instance's query_builder (if possible) or using a default
        ORDER BY <self.primary_key> ASC.
        """
        if not query_builder:
            query_builder = self.query_builder
        if (    order_by_params and order_by_params.get('order_by_model') and
                order_by_params.get('order_by_attribute') and
                order_by_params.get('order_by_direction')):
            order_by_params = old_schemata.OrderBySchema.to_python(
                order_by_params)
            order_by_params = [
                order_by_params['order_by_model'],
                order_by_params['order_by_attribute'],
                order_by_params['order_by_direction']
            ]
            order_by_expression = query_builder.get_SQLA_order_by(
                order_by_params, self.primary_key)
            query_builder.clear_errors()
            return query.order_by(order_by_expression)
        model_ = getattr(old_models, query_builder.model_name)
        return query.order_by(asc(getattr(model_, self.primary_key)))
Esempio n. 36
0
def ingredient_quantities(recipe, new_ingredients_list, units_list_dict):

    # -------------------------------- Preprocessing Recipe Dataset

    id_ingredients = {}
    id_ingredients[recipe["id"]] = []

    j = 0

    for ingredient_text in recipe["ingredients"]:

        id_ingredients[recipe["id"]].append({
            "id":
            j,
            "ingredient": (ingredient_text["text"]).lower()
        })

        j = j + 1

    # -------------------------------- Extracting Ingredients
    '''
    ingredients_count = {}

    if ingredient_counting:

        for ingredient_vocab in new_ingredients_list:

            ingredients_count[ingredient_vocab] = 0
    '''

    # Dictionary with the ingredients (from the vocabulary of ingredients) and the number of occurrences in recipes.

    new_id_ingredients_tokenized_position = {}

    p = inflect.engine()

    new_id_ingredients_tokenized_position[recipe["id"]] = []

    k = 0

    for ingredient_text in id_ingredients[recipe["id"]]:

        for ingredient_vocab in new_ingredients_list:

            if (re.search(r"\b" + re.escape(ingredient_vocab) + r"\b",
                          ingredient_text["ingredient"])
                    or re.search(
                        r"\b" + p.plural(re.escape(ingredient_vocab)) + r"\b",
                        ingredient_text["ingredient"])) is not None:

                new_id_ingredients_tokenized_position[recipe["id"]].append({
                    "id":
                    k,
                    "ingredient":
                    ingredient_vocab
                })
        k = k + 1
    '''
                if ingredient_counting:
                    ingredients_count[new_ingredients_list[j]] = ingredients_count[new_ingredients_list[j]] + 1
                    print(ingredients_count)
    '''
    # -------------------------------- Extracting Units and Quantities
    '''
    ingrs_quants_units = {}

    value = new_id_ingredients_tokenized_position[recipe["id"]]

    ingrs_quants_units[recipe["id"]] = []

    for value2 in value:

        for i in list(units_list_dict.keys()):

            if (re.search(r"\b" + re.escape(i) + r"\b", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"])) is not None:

                if re.search(r"[1-9][0-9][0-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]):

                    ingrs_quants_units[recipe["id"]].append({"ingredient": value2["ingredient"], "quantity": float(re.search(r"[1-9][0-9][0-9]", ((id_ingredients[recipe["id"]][value2["id"]])["ingredient"])).group()),"unit": i, "quantity (g)": float(re.search(r"[1-9][0-9][0-9]",((id_ingredients[recipe["id"]][value2["id"]])["ingredient"])).group()) * int(units_list_dict[i])})

                    break

                elif re.search(r"[1-9]/[1-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]):

                    ingrs_quants_units[recipe["id"]].append({"ingredient": value2["ingredient"], "quantity": int(((re.search(r"[1-9]/[1-9]",(id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()).split("/"))[0]) / int(((re.search(r"[1-9]/[1-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()).split("/"))[1]), "unit": i, "quantity (g)": (int(((re.search(r"[1-9]/[1-9]",(id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()).split("/"))[0]) / int(((re.search(r"[1-9]/[1-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()).split("/"))[1])) * int(units_list_dict[i])})

                    break

                elif re.search(r"[1-9][0-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]):

                    ingrs_quants_units[recipe["id"]].append({"ingredient": value2["ingredient"], "quantity": float(re.search(r"[0-9][0-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()), "unit": i, "quantity (g)": float(re.search(r"[0-9][0-9]",(id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()) * int(units_list_dict[i])})

                    break

                else:

                    ingrs_quants_units[recipe["id"]].append({"ingredient": value2["ingredient"], "quantity": float(units_list_dict[i]), "unit": i, "quantity (g)": int(units_list_dict[i])})

                    break

            elif i == len(list(units_list_dict.keys())) - 1:

                if re.search(r"[1-9][0-9][0-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]):

                    ingrs_quants_units[recipe["id"]].append({"ingredient": value2["ingredient"], "quantity": 200, "unit": value2["ingredient"], "quantity (g)": 200})

                elif re.search(r"[1-9]/[1-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]):

                    ingrs_quants_units[recipe["id"]].append({"ingredient": value2["ingredient"], "quantity": int(((re.search(r"[1-9]/[1-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()).split("/"))[0]) / int(((re.search(r"[1-9]/[1-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()).split("/"))[1]), "unit": value2["ingredient"],"quantity (g)": int(((re.search(r"[1-9]/[1-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()).split("/"))[0]) / int(((re.search(r"[1-9]/[1-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]).group()).split("/"))[1]) * 200})

                elif re.search(r"[1-9][0-9]", (id_ingredients[recipe["id"]][value2["id"]])["ingredient"]):

                    ingrs_quants_units[recipe["id"]].append({"ingredient": value2["ingredient"], "quantity": 200, "unit": value2["ingredient"], "quantity (g)": 200})

                else:

                    ingrs_quants_units[recipe["id"]].append({"ingredient": value2["ingredient"], "quantity": 200, "unit": value2["ingredient"], "quantity (g)": 200})
    '''
    #return ingrs_quants_units[recipe["id"]]

    return new_id_ingredients_tokenized_position
Esempio n. 37
0
:mod:`Quantulum` unit and entity loading functions.
"""

import json
import os
from builtins import open
from collections import defaultdict

import inflect

from ... import load
from . import lang

TOPDIR = os.path.dirname(__file__) or "."

PLURALS = inflect.engine()


###############################################################################
def pluralize(singular, count=None):
    return PLURALS.plural(singular, count)


def number_to_words(number):
    return PLURALS.number_to_words(number)


###############################################################################
def build_common_words():
    # Read raw 4 letter file
    path = os.path.join(TOPDIR, "common-units.txt")
Esempio n. 38
0
def plural(column):
    return engine().plural(sub('\_id$', '', column)) or column
Esempio n. 39
0
def singular(table):
    inflected = engine().singular_noun(table)
    return inflected and (inflected + '_id') or table
Esempio n. 40
0
#Storyteller/bots/inputparser.py
import logging
import inflect

logger = logging.getLogger()
inflector = inflect.engine()

class InputParser(object):
    def __init__(self):

    def has_word(self, input_text, word):
        keywords = [word]
        return any(keyword in input_text for keyword in keywords)

    def has_number(self, input_text, number):
        word_num = inflector.word_to_number(number)
        keywords = [number, word_num]

        return any(keyword in input_text for keyword in keywords)

Esempio n. 41
0
    def __init__(self):
        """
		report_data: Dict containing all required data to populate the report,
			 regardless of report template.
		report_options: Other report options unrelated to the actual data
			 such as color pallettes, etc.
		"""
        self.tech_report_path = './report_templates/tech_report/TechnicalReport/'
        self.tech_report_build = self.tech_report_path + 'dist/'

        self.jinja_env = Environment(
            loader=FileSystemLoader(os.path.abspath(constants.HTML_BASE_PATH)))

        self.REPORT_TEMP_FILEPATH = "report.html.temp"
        self.REPORT_DETAILS_FILEPATH = "./config/report_details.yml"
        self.REPORT_RENDERED_FILEPATH = "./report_pdfs/"
        self.OS_RELEASES_FILEPATH = './config/os_releases.yml'
        # #Filepath for the report template - to be changed according to the child class
        # self.REPORT_TEMPLATE_FILEPATH='BPG_Report_Template.html'
        self.REPORT_TEMPLATE_FILEPATH = "report_base.html.j2"

        self.report_data = {}
        self.report_options = {}
        """
		Loads all other details needed such as customer's name, etc
		These values are to be placed on the report_options dict when generating
		the report.
		"""
        self.yaml_parser = YAMLParser()
        report_details = self.yaml_parser.load_file(
            self.REPORT_DETAILS_FILEPATH)

        #Sanity check against empty file
        if report_details:
            p = inflect.engine()
            gen_day = p.ordinal(datetime.datetime.now().day)

            expire_date = datetime.datetime.now() + datetime.timedelta(days=45)
            exp_day = p.ordinal(expire_date.day)
            self.report_options.update({
                "include_customer_logo":
                self._find_customer_logo(
                    report_details.setdefault("customer_name", "")),
                "partner_name":
                report_details.setdefault("partner_name", ""),
                "partner_contacts":
                report_details.setdefault("partner_contacts", {}),
                "trend_contacts":
                report_details.setdefault("trend_contacts", {}),
                "generation_date":
                datetime.datetime.now(),
                "generation_month":
                datetime.datetime.now().strftime("%B"),
                "generation_day":
                gen_day,
                "expiration_date":
                expire_date,
                "expiration_day":
                exp_day,
                "expiration_month":
                expire_date.strftime("%B"),
            })

        else:
            print("WARNING: No {} found! exiting...".format(
                self.REPORT_DETAILS_FILEPATH))
            exit(1)
Esempio n. 42
0
    COMPARISONS:    classical
          compare compare_nouns compare_verbs compare_adjs
    ARTICLES:   classical inflect num a an
    NUMERICAL:      ordinal number_to_words
    USER_DEFINED:   defnoun defverb defadj defa defan

Exceptions:
 UnknownClassicalModeError
 BadNumValueError
 BadChunkingOptionError
 NumOutOfRangeError
 BadUserDefinedPatternError
 BadRcFileError
 BadGenderError
"""
plurals = inflect.engine()


def fetch(word):
    """given a single word, fix plural and singular - returning graph picks"""
    pass


def get_siblings(word):
    """Return a set of assocated words theough noun and plural etc... extraction.
    """

    # func for each word type.
    defs = (
        'plural',
        'plural_noun',
Esempio n. 43
0
def p17():

    n = 1000

    return len("".join(map(inflect.engine().number_to_words,
                           range(1, n + 1))).replace(' ', '').replace('-', ''))
Esempio n. 44
0
import inflect
import re

count = 0
num = inflect.engine()

for i in range(1,1001):
    count+=len(re.sub('[ -]','',num.number_to_words(i)))
print(count)
Esempio n. 45
0
 def create_inflect_engine(self):
     if self.noinflect:
         return _DummyInflectEngine()
     else:
         import inflect
         return inflect.engine()
Esempio n. 46
0
def numToWords(arg):

    p = inflect.engine()
    return p.number_to_words(arg)
def inflecting(local_step_number):  # Converting int(1) to Str(First)...
    p = inflect.engine()  # If input is 1
    str_step = p.ordinal(local_step_number) + " step"
    return str_step  # Output 1st
Esempio n. 48
0
#!/usr/bin/env python3

import os
import inflect

pluralizer = inflect.engine()

# Loop through files
for filename in os.listdir():
    # Skip irrelevant files
    if not filename.endswith('txt'):
        continue

    # Divide into important parts and find category name
    institution_type, institution = filename.split('_')
    category = pluralizer.plural(institution_type)

    if not os.path.exists(category):
        os.mkdir(category)

    final_location = os.path.join(category, institution)
    os.rename(filename, final_location)
Esempio n. 49
0
class TestView(TestCase):
    """Base test view for testing OLD Pyramid views.

    Example usage within a method::

        res = self.app.get(
            '/forms', status=200,
            extra_environ={'test.authentication.role': 'viewer'})
    """
    # pylint: disable=too-many-instance-attributes

    inflect_p = inflect.engine()
    inflect_p.classical()
    old_name = SETTINGS['old_name']
    old_name_2 = old_name_2_tests

    @classmethod
    def tearDownClass(cls):
        Session.close_all()
        if Session2:
            Session2.close_all()

    def setUp(self):
        self.default_setup()
        self.create_db()

    def tearDown(self, **kwargs):
        """Clean up after a test."""
        db = DBUtils(self.dbsession, self.settings)
        clear_all_tables = kwargs.get('clear_all_tables', False)
        dirs_to_clear = kwargs.get('dirs_to_clear', [])
        dirs_to_destroy = kwargs.get('dirs_to_destroy', [])
        if clear_all_tables:
            db.clear_all_tables(['language'])
        else:
            self.clear_all_models(self.dbsession)
        for dir_path in dirs_to_clear:
            h.clear_directory_of_files(getattr(self, dir_path))
        for dir_name in dirs_to_destroy:
            h.destroy_all_directories(self.inflect_p.plural(dir_name),
                                      self.settings)
        if self.Session2:
            db = DBUtils(self.dbsession, self.settings2)
            clear_all_tables = kwargs.get('clear_all_tables', False)
            dirs_to_clear = kwargs.get('dirs_to_clear', [])
            dirs_to_destroy = kwargs.get('dirs_to_destroy', [])
            if clear_all_tables:
                db.clear_all_tables(['language'])
            else:
                self.clear_all_models(self.dbsession2)
            for attr_name in dirs_to_clear:
                dir_name = attr_name.replace('_path', '')
                dir_path = h.get_old_directory_path(dir_name,
                                                    settings=self.settings2)
                h.clear_directory_of_files(dir_path)
            for dir_name in dirs_to_destroy:
                h.destroy_all_directories(self.inflect_p.plural(dir_name),
                                          self.settings2)
        self.tear_down_dbsession()

    def tear_down_dbsession(self):
        self.dbsession.commit()
        Session.remove()
        if Session2:
            self.dbsession2.commit()
            Session2.remove()

    def default_setup(self):
        self.settings = SETTINGS
        self.settings2 = SETTINGS_2
        self.config = CONFIG
        self.Session = Session
        self.Session2 = Session2
        self.dbsession = Session()
        if Session2:
            self.dbsession2 = Session2()
        self.app = APP
        setup_logging('config.ini#loggers')
        self._setattrs()
        self._setcreateparams()

    def create_db(self):
        # Create the database tables
        h.create_OLD_directories(self.settings)
        languages = omb.get_language_objects(self.settings['here'],
                                             truncated=True)
        administrator = omb.generate_default_administrator(
            settings=self.settings)
        contributor = omb.generate_default_contributor(settings=self.settings)
        viewer = omb.generate_default_viewer(settings=self.settings)
        Base.metadata.drop_all(bind=self.dbsession.bind, checkfirst=True)
        self.dbsession.commit()
        Base.metadata.create_all(bind=self.dbsession.bind, checkfirst=True)
        self.dbsession.add_all(languages +
                               [administrator, contributor, viewer])
        self.dbsession.commit()
        if self.Session2:
            h.create_OLD_directories(self.settings2)
            languages = omb.get_language_objects(self.settings2['here'],
                                                 truncated=True)
            administrator = omb.generate_default_administrator(
                settings=self.settings2)
            contributor = omb.generate_default_contributor(
                settings=self.settings2)
            viewer = omb.generate_default_viewer(settings=self.settings2)
            Base.metadata.drop_all(bind=self.dbsession2.bind, checkfirst=True)
            self.dbsession2.commit()
            Base.metadata.create_all(bind=self.dbsession2.bind,
                                     checkfirst=True)
            self.dbsession2.add_all(languages +
                                    [administrator, contributor, viewer])
            self.dbsession2.commit()

    @staticmethod
    def clear_all_models(dbsession, retain=('Language', )):
        """Convenience function for removing all OLD models from the database.
        The retain parameter is a list of model names that should not be
        cleared.
        """
        for model_name in get_model_names():
            if model_name not in retain:
                model = getattr(old_models, model_name)
                if not issubclass(model, old_models.Model):
                    continue
                models = dbsession.query(model).all()
                for model in models:
                    dbsession.delete(model)
        dbsession.commit()

    def _setattrs(self):
        """Set a whole bunch of instance attributes that are useful in tests."""
        self.extra_environ_view = {'test.authentication.role': 'viewer'}
        self.extra_environ_contrib = {
            'test.authentication.role': 'contributor'
        }
        self.extra_environ_admin = {
            'test.authentication.role': 'administrator'
        }
        self.extra_environ_view_appset = {
            'test.authentication.role': 'viewer',
            'test.application_settings': True
        }
        self.extra_environ_contrib_appset = {
            'test.authentication.role': 'contributor',
            'test.application_settings': True
        }
        self.extra_environ_admin_appset = {
            'test.authentication.role': 'administrator',
            'test.application_settings': True
        }
        self.json_headers = {'Content-Type': 'application/json'}
        self.here = self.settings['here']
        self.files_path = h.get_old_directory_path('files',
                                                   settings=self.settings)
        self.reduced_files_path = h.get_old_directory_path(
            'reduced_files', settings=self.settings)
        self.test_files_path = os.path.join(self.here, 'old', 'tests', 'data',
                                            'files')
        self.create_reduced_size_file_copies = asbool(
            self.settings.get('create_reduced_size_file_copies', False))
        self.preferred_lossy_audio_format = self.settings.get(
            'preferred_lossy_audio_format', 'ogg')
        self.corpora_path = h.get_old_directory_path('corpora',
                                                     settings=self.settings)
        self.test_datasets_path = os.path.join(self.here, 'old', 'tests',
                                               'data', 'datasets')
        self.test_scripts_path = os.path.join(self.here, 'old', 'tests',
                                              'scripts')
        self.loremipsum100_path = os.path.join(self.test_datasets_path,
                                               'loremipsum_100.txt')
        self.loremipsum1000_path = os.path.join(self.test_datasets_path,
                                                'loremipsum_1000.txt')
        self.loremipsum10000_path = os.path.join(self.test_datasets_path,
                                                 'loremipsum_10000.txt')
        self.users_path = h.get_old_directory_path('users',
                                                   settings=self.settings)
        self.morphologies_path = h.get_old_directory_path(
            'morphologies', settings=self.settings)
        self.morphological_parsers_path = h.get_old_directory_path(
            'morphological_parsers', settings=self.settings)
        self.phonologies_path = h.get_old_directory_path(
            'phonologies', settings=self.settings)
        self.morpheme_language_models_path = h.get_old_directory_path(
            'morpheme_language_models', settings=self.settings)
        self.test_phonologies_path = os.path.join(self.here, 'old', 'tests',
                                                  'data', 'phonologies')
        self.test_phonology_script_path = os.path.join(
            self.test_phonologies_path, 'test_phonology.script')
        self.test_malformed_phonology_script_path = os.path.join(
            self.test_phonologies_path, 'test_phonology_malformed.script')
        self.test_phonology_no_phonology_script_path = os.path.join(
            self.test_phonologies_path, 'test_phonology_malformed.script')
        self.test_medium_phonology_script_path = os.path.join(
            self.test_phonologies_path, 'test_phonology_medium.script')
        self.test_large_phonology_script_path = os.path.join(
            self.test_phonologies_path, 'test_phonology_large.script')
        self.test_phonology_testless_script_path = os.path.join(
            self.test_phonologies_path, 'test_phonology_no_tests.script')
        self.test_morphologies_path = os.path.join(self.here, 'old', 'tests',
                                                   'data', 'morphologies')
        self.test_morphophonologies_path = os.path.join(
            self.here, 'old', 'tests', 'data', 'morphophonologies')

    def _setcreateparams(self):
        """Set a whole bunch of ``_create_params``-suffixed instance attributes
        that are useful for creating new resources within tests.
        """
        self.application_settings_create_params = {
            'object_language_name': '',
            'object_language_id': '',
            'metalanguage_name': '',
            'metalanguage_id': '',
            'metalanguage_inventory': '',
            'orthographic_validation':
            'None',  # Value should be one of ['None', 'Warning', 'Error']
            'narrow_phonetic_inventory': '',
            'narrow_phonetic_validation': 'None',
            'broad_phonetic_inventory': '',
            'broad_phonetic_validation': 'None',
            'morpheme_break_is_orthographic': '',
            'morpheme_break_validation': 'None',
            'phonemic_inventory': '',
            'morpheme_delimiters': '',
            'punctuation': '',
            'grammaticalities': '',
            'unrestricted_users': [],  # A list of user ids
            'storage_orthography': '',  # An orthography id
            'input_orthography': '',  # An orthography id
            'output_orthography': ''  # An orthography id
        }
        self.collection_create_params = {
            'title': '',
            'type': '',
            'url': '',
            'description': '',
            'markup_language': '',
            'contents': '',
            'speaker': '',
            'source': '',
            'elicitor': '',
            'enterer': '',
            'date_elicited': '',
            'tags': [],
            'files': []
        }
        self.corpus_create_params = {
            'name': '',
            'description': '',
            'content': '',
            'form_search': '',
            'tags': []
        }
        self.file_create_params = {
            'name': '',
            'description': '',
            'date_elicited': '',  # mm/dd/yyyy
            'elicitor': '',
            'speaker': '',
            'utterance_type': '',
            'embedded_file_markup': '',
            'embedded_file_password': '',
            'tags': [],
            'forms': [],
            'file': ''  # file data Base64 encoded
        }
        self.file_create_params_base64 = {
            'filename': '',  # Will be filtered out on update requests
            'description': '',
            'date_elicited': '',  # mm/dd/yyyy
            'elicitor': '',
            'speaker': '',
            'utterance_type': '',
            'tags': [],
            'forms': [],
            'base64_encoded_file': ''  # file data Base64 encoded; will be
            # filtered out on update requests
        }
        self.file_create_params_MPFD = {
            'filename': '',  # Will be filtered out on update requests
            'description': '',
            'date_elicited': '',  # mm/dd/yyyy
            'elicitor': '',
            'speaker': '',
            'utterance_type': '',
            'tags-0': '',
            'forms-0': ''
        }
        self.file_create_params_sub_ref = {
            'parent_file': '',
            'name': '',
            'start': '',
            'end': '',
            'description': '',
            'date_elicited': '',  # mm/dd/yyyy
            'elicitor': '',
            'speaker': '',
            'utterance_type': '',
            'tags': [],
            'forms': []
        }
        self.file_create_params_ext_host = {
            'url': '',
            'name': '',
            'password': '',
            'MIME_type': '',
            'description': '',
            'date_elicited': '',  # mm/dd/yyyy
            'elicitor': '',
            'speaker': '',
            'utterance_type': '',
            'tags': [],
            'forms': []
        }
        self.form_create_params = {
            'transcription': '',
            'phonetic_transcription': '',
            'narrow_phonetic_transcription': '',
            'morpheme_break': '',
            'grammaticality': '',
            'morpheme_gloss': '',
            'translations': [],
            'comments': '',
            'speaker_comments': '',
            'elicitation_method': '',
            'tags': [],
            'syntactic_category': '',
            'speaker': '',
            'elicitor': '',
            'verifier': '',
            'source': '',
            'status': 'tested',
            'date_elicited': '',  # mm/dd/yyyy
            'syntax': '',
            'semantics': ''
        }
        self.form_search_create_params = {
            'name': '',
            'search': '',
            'description': '',
            'searcher': ''
        }
        self.morpheme_language_model_create_params = {
            'name': '',
            'description': '',
            'corpus': '',
            'vocabulary_morphology': '',
            'toolkit': '',
            'order': '',
            'smoothing': '',
            'categorial': False
        }
        self.morphology_create_params = {
            'name': '',
            'description': '',
            'lexicon_corpus': '',
            'rules_corpus': '',
            'script_type': 'lexc',
            'extract_morphemes_from_rules_corpus': False,
            'rules': '',
            'rich_upper': True,
            'rich_lower': False,
            'include_unknowns': False
        }
        self.morphological_parser_create_params = {
            'name': '',
            'phonology': '',
            'morphology': '',
            'language_model': '',
            'description': ''
        }
        self.orthography_create_params = {
            'name': '',
            'orthography': '',
            'lowercase': False,
            'initial_glottal_stops': True
        }
        self.page_create_params = {
            'name': '',
            'heading': '',
            'markup_language': '',
            'content': '',
            'html': ''
        }
        self.phonology_create_params = {
            'name': '',
            'description': '',
            'script': ''
        }
        self.source_create_params = {
            'file': '',
            'type': '',
            'key': '',
            'address': '',
            'annote': '',
            'author': '',
            'booktitle': '',
            'chapter': '',
            'crossref': '',
            'edition': '',
            'editor': '',
            'howpublished': '',
            'institution': '',
            'journal': '',
            'key_field': '',
            'month': '',
            'note': '',
            'number': '',
            'organization': '',
            'pages': '',
            'publisher': '',
            'school': '',
            'series': '',
            'title': '',
            'type_field': '',
            'url': '',
            'volume': '',
            'year': '',
            'affiliation': '',
            'abstract': '',
            'contents': '',
            'copyright': '',
            'ISBN': '',
            'ISSN': '',
            'keywords': '',
            'language': '',
            'location': '',
            'LCCN': '',
            'mrnumber': '',
            'price': '',
            'size': '',
        }
        self.speaker_create_params = {
            'first_name': '',
            'last_name': '',
            'page_content': '',
            'dialect': 'dialect',
            'markup_language': 'reStructuredText'
        }
        self.syntactic_category_create_params = {
            'name': '',
            'type': '',
            'description': ''
        }
        self.user_create_params = {
            'username': '',
            'password': '',
            'password_confirm': '',
            'first_name': '',
            'last_name': '',
            'email': '',
            'affiliation': '',
            'role': '',
            'markup_language': '',
            'page_content': '',
            'input_orthography': None,
            'output_orthography': None
        }

    @staticmethod
    def poll(requester, changing_attr, changing_attr_originally, log,
             **kwargs):
        """Poll a resource by calling ``requester`` until the value of
        ``changing_attr`` no longer matches ``changing_attr_originally``.
        """
        wait = kwargs.get('wait', 2)
        vocal = kwargs.get('vocal', True)
        task_descr = kwargs.get('task_descr', 'task')
        seconds_elapsed = 0
        while True:
            response = requester()
            resp = response.json_body
            if changing_attr_originally != resp[changing_attr]:
                if vocal:
                    log.debug('Task terminated')
                break
            else:
                if vocal:
                    log.debug('Waiting for %s to terminate: %s', task_descr,
                              h.human_readable_seconds(seconds_elapsed))
            sleep(wait)
            seconds_elapsed = seconds_elapsed + wait
        return resp
Esempio n. 50
0
import discord
import requests
import json
import inflect

ENGINE = inflect.engine()


class Member:
    def __init__(self, dict):
        self.global_score = dict["global_score"]
        self.name = dict["name"]
        self.stars = dict["stars"]
        self.last_star_ts = dict["last_star_ts"]
        self.completion_day_level = dict["completion_day_level"]
        self.id = dict["id"]
        self.local_score = dict["local_score"]

    async def mention(self, client):
        cursor = client.db.cursor()
        cursor.execute("SELECT user_id FROM link_data WHERE aoc_id=?",
                       (self.id, ))
        user_id = cursor.fetchone()
        if user_id:
            user = await client.fetch_user(int(user_id[0]))
            return user.mention
        else:
            return None


class Leaderboard:
Esempio n. 51
0
def build_figure_py(
    trace_node,
    base_package,
    base_classname,
    fig_classname,
    data_validator,
    layout_validator,
    frame_validator,
    subplot_nodes,
    layout_array_nodes,
):
    """

    Parameters
    ----------
    trace_node : PlotlyNode
        Root trace node (the node that is the parent of all of the
        individual trace nodes like bar, scatter, etc.)
    base_package : str
        Package that the figure's superclass resides in
    base_classname : str
        Name of the figure's superclass
    fig_classname : str
        Name of the Figure class to be generated
    data_validator : BaseDataValidator
        DataValidator instance
    layout_validator : CompoundValidator
        LayoutValidator instance
    frame_validator : CompoundArrayValidator
        FrameValidator instance
    subplot_nodes: list of str
        List of names of all of the layout subplot properties
    layout_array_nodes: list of PlotlyNode
        List of array nodes under layout that can be positioned using xref/yref
    Returns
    -------
    str
        Source code for figure class definition
    """

    # Initialize source code buffer
    # -----------------------------
    buffer = StringIO()

    # Get list of trace type nodes
    # ----------------------------
    trace_nodes = trace_node.child_compound_datatypes

    # Write imports
    # -------------
    # ### Import base class ###
    buffer.write(f"from plotly.{base_package} import {base_classname}\n")

    # ### Import trace graph_obj classes / layout ###
    trace_types_csv = ", ".join([n.name_datatype_class
                                 for n in trace_nodes] + ["layout as _layout"])
    buffer.write(f"from plotly.graph_objs import ({trace_types_csv})\n")

    # Write class definition
    # ----------------------
    buffer.write(f"""

class {fig_classname}({base_classname}):\n""")

    # ### Constructor ###
    # Build constructor description strings
    data_description = reindent_validator_description(data_validator, 8)
    layout_description = reindent_validator_description(layout_validator, 8)
    frames_description = reindent_validator_description(frame_validator, 8)

    buffer.write(f"""
    def __init__(self, data=None, layout=None,
                 frames=None, skip_invalid=False, **kwargs):
        \"\"\"
        Create a new {fig_classname} instance
        
        Parameters
        ----------
        data
            {data_description}
            
        layout
            {layout_description}
            
        frames
            {frames_description}
            
        skip_invalid: bool
            If True, invalid properties in the figure specification will be
            skipped silently. If False (default) invalid properties in the
            figure specification will result in a ValueError

        Raises
        ------
        ValueError
            if a property in the specification of data, layout, or frames
            is invalid AND skip_invalid is False
        \"\"\"
        super({fig_classname} ,self).__init__(data, layout,
                                              frames, skip_invalid,
                                              **kwargs)
    """)

    # ### add_trace methods for each trace type ###
    for trace_node in trace_nodes:

        include_secondary_y = bool([
            d for d in trace_node.child_datatypes if d.name_property == "yaxis"
        ])

        # #### Function signature ####
        buffer.write(f"""
    def add_{trace_node.plotly_name}(self""")

        # #### Function params####
        param_extras = ["row", "col"]
        if include_secondary_y:
            param_extras.append("secondary_y")
        add_constructor_params(buffer,
                               trace_node.child_datatypes,
                               append_extras=param_extras)

        # #### Docstring ####
        header = f"Add a new {trace_node.name_datatype_class} trace"

        doc_extras = [
            (
                "row : int or None (default)",
                "Subplot row index (starting from 1) for the trace to be "
                "added. Only valid if figure was created using "
                "`plotly.tools.make_subplots`",
            ),
            (
                "col : int or None (default)",
                "Subplot col index (starting from 1) for the trace to be "
                "added. Only valid if figure was created using "
                "`plotly.tools.make_subplots`",
            ),
        ]

        if include_secondary_y:
            doc_extras.append((
                "secondary_y: boolean or None (default None)",
                """\
            If True, associate this trace with the secondary y-axis of the
            subplot at the specified row and col. Only valid if all of the
            following conditions are satisfied:
              * The figure was created using `plotly.subplots.make_subplots`.
              * The row and col arguments are not None
              * The subplot at the specified row and col has type xy
                (which is the default) and secondary_y True.  These
                properties are specified in the specs argument to
                make_subplots. See the make_subplots docstring for more info.\
""",
            ))

        add_docstring(
            buffer,
            trace_node,
            header,
            append_extras=doc_extras,
            return_type=fig_classname,
        )

        # #### Function body ####
        buffer.write(f"""
        new_trace = {trace_node.name_datatype_class}(
        """)

        for i, subtype_node in enumerate(trace_node.child_datatypes):
            subtype_prop_name = subtype_node.name_property
            buffer.write(f"""
                {subtype_prop_name}={subtype_prop_name},""")

        buffer.write(f"""
            **kwargs)""")

        if include_secondary_y:
            secondary_y_kwarg = ", secondary_y=secondary_y"
        else:
            secondary_y_kwarg = ""

        buffer.write(f"""
        return self.add_trace(
            new_trace, row=row, col=col{secondary_y_kwarg})""")

    # update layout subplots
    # ----------------------
    inflect_eng = inflect.engine()
    for subplot_node in subplot_nodes:
        singular_name = subplot_node.name_property
        plural_name = inflect_eng.plural_noun(singular_name)

        if singular_name == "yaxis":
            secondary_y_1 = ", secondary_y=None"
            secondary_y_2 = ", secondary_y=secondary_y"
            secondary_y_docstring = f"""
        secondary_y: boolean or None (default None)
            * If True, only select yaxis objects associated with the secondary
              y-axis of the subplot.
            * If False, only select yaxis objects associated with the primary
              y-axis of the subplot.
            * If None (the default), do not filter yaxis objects based on
              a secondary y-axis condition. 
            
            To select yaxis objects by secondary y-axis, the Figure must
            have been created using plotly.subplots.make_subplots. See
            the docstring for the specs argument to make_subplots for more
            info on creating subplots with secondary y-axes."""
        else:
            secondary_y_1 = ""
            secondary_y_2 = ""
            secondary_y_docstring = ""

        buffer.write(f"""

    def select_{plural_name}(
            self, selector=None, row=None, col=None{secondary_y_1}):
        \"\"\"
        Select {singular_name} subplot objects from a particular subplot cell
        and/or {singular_name} subplot objects that satisfy custom selection
        criteria.

        Parameters
        ----------
        selector: dict or None (default None)
            Dict to use as selection criteria.
            {singular_name} objects will be selected if they contain
            properties corresponding to all of the dictionary's keys, with
            values that exactly match the supplied values. If None
            (the default), all {singular_name} objects are selected.
        row, col: int or None (default None)
            Subplot row and column index of {singular_name} objects to select.
            To select {singular_name} objects by row and column, the Figure
            must have been created using plotly.subplots.make_subplots.
            If None (the default), all {singular_name} objects are selected.\
{secondary_y_docstring}
        Returns
        -------
        generator
            Generator that iterates through all of the {singular_name}
            objects that satisfy all of the specified selection criteria
        \"\"\"

        return self._select_layout_subplots_by_prefix(
            '{singular_name}', selector, row, col{secondary_y_2})

    def for_each_{singular_name}(
            self, fn, selector=None, row=None, col=None{secondary_y_1}):
        \"\"\"
        Apply a function to all {singular_name} objects that satisfy the
        specified selection criteria
        
        Parameters
        ----------
        fn:
            Function that inputs a single {singular_name} object.
        selector: dict or None (default None)
            Dict to use as selection criteria.
            {singular_name} objects will be selected if they contain
            properties corresponding to all of the dictionary's keys, with
            values that exactly match the supplied values. If None
            (the default), all {singular_name} objects are selected.
        row, col: int or None (default None)
            Subplot row and column index of {singular_name} objects to select.
            To select {singular_name} objects by row and column, the Figure
            must have been created using plotly.subplots.make_subplots.
            If None (the default), all {singular_name} objects are selected.\
{secondary_y_docstring}
        Returns
        -------
        self
            Returns the Figure object that the method was called on
        \"\"\"
        for obj in self.select_{plural_name}(
                selector=selector, row=row, col=col{secondary_y_2}):
            fn(obj)

        return self

    def update_{plural_name}(
            self,
            patch=None,
            selector=None,
            overwrite=False,
            row=None, col=None{secondary_y_1},
            **kwargs):
        \"\"\"
        Perform a property update operation on all {singular_name} objects
        that satisfy the specified selection criteria
        
        Parameters
        ----------
        patch: dict
            Dictionary of property updates to be applied to all
            {singular_name} objects that satisfy the selection criteria.
        selector: dict or None (default None)
            Dict to use as selection criteria.
            {singular_name} objects will be selected if they contain
            properties corresponding to all of the dictionary's keys, with
            values that exactly match the supplied values. If None
            (the default), all {singular_name} objects are selected.
        overwrite: bool
            If True, overwrite existing properties. If False, apply updates
            to existing properties recursively, preserving existing
            properties that are not specified in the update operation.
        row, col: int or None (default None)
            Subplot row and column index of {singular_name} objects to select.
            To select {singular_name} objects by row and column, the Figure
            must have been created using plotly.subplots.make_subplots.
            If None (the default), all {singular_name} objects are selected.\
{secondary_y_docstring}
        **kwargs
            Additional property updates to apply to each selected
            {singular_name} object. If a property is specified in
            both patch and in **kwargs then the one in **kwargs
            takes precedence.
        Returns
        -------
        self
            Returns the Figure object that the method was called on
        \"\"\"
        for obj in self.select_{plural_name}(
                selector=selector, row=row, col=col{secondary_y_2}):
            obj.update(patch, overwrite=overwrite, **kwargs)

        return self""")

    # update annotations/shapes/images
    # --------------------------------
    for node in layout_array_nodes:
        singular_name = node.plotly_name
        plural_name = node.name_property

        if singular_name == "image":
            # Rename image to layout_image to avoid conflict with an image trace
            method_prefix = "layout_"
        else:
            method_prefix = ""

        buffer.write(f"""
    def select_{method_prefix}{plural_name}(
        self, selector=None, row=None, col=None, secondary_y=None
    ):
        \"\"\"
        Select {plural_name} from a particular subplot cell and/or {plural_name}
        that satisfy custom selection criteria.

        Parameters
        ----------
        selector: dict or None (default None)
            Dict to use as selection criteria.
            Annotations will be selected if they contain properties corresponding
            to all of the dictionary's keys, with values that exactly match
            the supplied values. If None (the default), all {plural_name} are
            selected.
        row, col: int or None (default None)
            Subplot row and column index of {plural_name} to select.
            To select {plural_name} by row and column, the Figure must have been
            created using plotly.subplots.make_subplots.  To select only those
            {singular_name} that are in paper coordinates, set row and col to the
            string 'paper'.  If None (the default), all {plural_name} are selected.
        secondary_y: boolean or None (default None)
            * If True, only select {plural_name} associated with the secondary
              y-axis of the subplot.
            * If False, only select {plural_name} associated with the primary
              y-axis of the subplot.
            * If None (the default), do not filter {plural_name} based on secondary
              y-axis.

            To select {plural_name} by secondary y-axis, the Figure must have been
            created using plotly.subplots.make_subplots. See the docstring
            for the specs argument to make_subplots for more info on
            creating subplots with secondary y-axes.
        Returns
        -------
        generator
            Generator that iterates through all of the {plural_name} that satisfy
            all of the specified selection criteria
        \"\"\"
        return self._select_annotations_like(
            "{plural_name}", selector=selector, row=row, col=col, secondary_y=secondary_y
        )

    def for_each_{method_prefix}{singular_name}(
        self, fn, selector=None, row=None, col=None, secondary_y=None
    ):
        \"\"\"
        Apply a function to all {plural_name} that satisfy the specified selection
        criteria

        Parameters
        ----------
        fn:
            Function that inputs a single {singular_name} object.
        selector: dict or None (default None)
            Dict to use as selection criteria.
            Traces will be selected if they contain properties corresponding
            to all of the dictionary's keys, with values that exactly match
            the supplied values. If None (the default), all {plural_name} are
            selected.
        row, col: int or None (default None)
            Subplot row and column index of {plural_name} to select.
            To select {plural_name} by row and column, the Figure must have been
            created using plotly.subplots.make_subplots.  To select only those
            {plural_name} that are in paper coordinates, set row and col to the
            string 'paper'.  If None (the default), all {plural_name} are selected.
        secondary_y: boolean or None (default None)
            * If True, only select {plural_name} associated with the secondary
              y-axis of the subplot.
            * If False, only select {plural_name} associated with the primary
              y-axis of the subplot.
            * If None (the default), do not filter {plural_name} based on secondary
              y-axis.

            To select {plural_name} by secondary y-axis, the Figure must have been
            created using plotly.subplots.make_subplots. See the docstring
            for the specs argument to make_subplots for more info on
            creating subplots with secondary y-axes.
        Returns
        -------
        self
            Returns the Figure object that the method was called on
        \"\"\"
        for obj in self._select_annotations_like(
            prop='{plural_name}',
            selector=selector,
            row=row,
            col=col,
            secondary_y=secondary_y,
        ):
            fn(obj)

        return self

    def update_{method_prefix}{plural_name}(
        self,
        patch=None,
        selector=None,
        row=None,
        col=None,
        secondary_y=None,
        **kwargs
    ):
        \"\"\"
        Perform a property update operation on all {plural_name} that satisfy the
        specified selection criteria

        Parameters
        ----------
        patch: dict or None (default None)
            Dictionary of property updates to be applied to all {plural_name} that
            satisfy the selection criteria.
        selector: dict or None (default None)
            Dict to use as selection criteria.
            Traces will be selected if they contain properties corresponding
            to all of the dictionary's keys, with values that exactly match
            the supplied values. If None (the default), all {plural_name} are
            selected.
        row, col: int or None (default None)
            Subplot row and column index of {plural_name} to select.
            To select {plural_name} by row and column, the Figure must have been
            created using plotly.subplots.make_subplots.  To select only those
            {singular_name} that are in paper coordinates, set row and col to the
            string 'paper'.  If None (the default), all {plural_name} are selected.
        secondary_y: boolean or None (default None)
            * If True, only select {plural_name} associated with the secondary
              y-axis of the subplot.
            * If False, only select {plural_name} associated with the primary
              y-axis of the subplot.
            * If None (the default), do not filter {plural_name} based on secondary
              y-axis.

            To select {plural_name} by secondary y-axis, the Figure must have been
            created using plotly.subplots.make_subplots. See the docstring
            for the specs argument to make_subplots for more info on
            creating subplots with secondary y-axes.
        **kwargs
            Additional property updates to apply to each selected {singular_name}. If
            a property is specified in both patch and in **kwargs then the
            one in **kwargs takes precedence.

        Returns
        -------
        self
            Returns the Figure object that the method was called on
        \"\"\"
        for obj in self._select_annotations_like(
            prop='{plural_name}',
            selector=selector,
            row=row,
            col=col,
            secondary_y=secondary_y,
        ):
            obj.update(patch, **kwargs)

        return self
""")
        # Add layout array items
        buffer.write(f"""
    def add_{method_prefix}{singular_name}(self""")
        add_constructor_params(
            buffer,
            node.child_datatypes,
            prepend_extras=["arg"],
            append_extras=["row", "col", "secondary_y"],
        )

        prepend_extras = [(
            "arg",
            f"instance of {node.name_datatype_class} or dict with "
            "compatible properties",
        )]
        append_extras = [
            ("row", f"Subplot row for {singular_name}"),
            ("col", f"Subplot column for {singular_name}"),
            ("secondary_y",
             f"Whether to add {singular_name} to secondary y-axis"),
        ]
        add_docstring(
            buffer,
            node,
            header=
            f"Create and add a new {singular_name} to the figure's layout",
            prepend_extras=prepend_extras,
            append_extras=append_extras,
            return_type=fig_classname,
        )

        # #### Function body ####
        buffer.write(f"""
        new_obj = _layout.{node.name_datatype_class}(arg,
            """)

        for i, subtype_node in enumerate(node.child_datatypes):
            subtype_prop_name = subtype_node.name_property
            buffer.write(f"""
                {subtype_prop_name}={subtype_prop_name},""")

        buffer.write("""**kwargs)""")

        buffer.write(f"""
        return self._add_annotation_like(
            '{singular_name}',
            '{plural_name}',
            new_obj,
            row=row,
            col=col,
            secondary_y=secondary_y,
        )""")

    # Return source string
    # --------------------
    buffer.write("\n")
    return buffer.getvalue()
Esempio n. 52
0
    def _estimate_word_timings_fasttext(self, text, total_duration_frames):
        """
        This is a convenience functions that enables the model to work with plaintext 
        transcriptions in place of a time-annotated JSON file from Google Speech-to-Text.

        It does the following two things:
        
            1) Encodes the given text into word vectors using FastText embedding
            
            2) Assuming 10 FPS and the given length, estimates the following features for each frame:
                - elapsed time since the beginning of the current word 
                - remaining time from the current word
                - the duration of the current word
                - the progress as the ratio 'elapsed_time / duration'
                - the pronunciation speed of the current word (number of syllables per decisecond)
               so that the word length is proportional to the number of syllables in it.
        
        Args: 
            text:  the plaintext transcription
            total_duration_frames:  the total duration of the speech (in frames)

        Returns:
            feature_array:  a numpy array of shape (305, n_frames) that contains the text features
        """
        print("Estimating word timings with FastText using syllable count.")
        print(f'\nInput text:\n"{text}"')

        # The fillers will be encoded with the same vector
        filler_encoding = self.embedding["ah"]
        fillers = ["eh", "ah", "like", "kind of"]
        delimiters = ['.', '!', '?']
        n_syllables = []

        # The transcription might contain numbers - we will use the 'inflect' library
        # to convert those to words e.g. 456 to "four hundred fifty-six"
        num_converter = inflect.engine()

        words = []
        for word in text.split():
            # Remove the delimiters
            for d in delimiters:
                word.replace(d, '')

            # If the current word is not a number, we just append it to the list of words (and calculate the syllable count too)
            if not word.isnumeric() and not word[:-1].isnumeric():
                # NOTE: we check word[:-1] because we want to interpret a string like "456," as a number too
                words.append(word)
                n_syllables.append(count_syllables(word))
            else:
                number_in_words = num_converter.number_to_words(word,
                                                                andword="")
                # Append each word in the number (e.g. "four hundred fifty-six") to the list of words
                for number_word in number_in_words.split():
                    words.append(number_word)
                    n_syllables.append(count_syllables(number_word))

        total_n_syllables = sum(n_syllables)
        elapsed_deciseconds = 0
        # Shape of (batch_size, frame_length, 305)
        feature_array = []

        for curr_word, word_n_syllables in zip(words, n_syllables):
            # The estimated word durations are proportional to the number of syllables in the word
            if word_n_syllables == 0:
                word_n_syllables = 1

            word_encoding = self.embedding[curr_word]
            # We take the ceiling to not lose information
            # (if the text was shorter than the audio because of rounding errors, then
            #  the audio would be cropped to the text's length)
            w_duration = ceil(total_duration_frames * word_n_syllables /
                              total_n_syllables)
            w_speed = word_n_syllables / w_duration if w_duration > 0 else 10  # Because 10 FPS
            w_start = elapsed_deciseconds
            w_end = w_start + w_duration

            # print("Word: {} | Duration: {} | #Syl: {} | time: {}-{}".format(curr_word, w_duration, word_n_syllables, w_start, w_end))

            while elapsed_deciseconds < w_end:
                elapsed_deciseconds += 1

                w_elapsed_time = elapsed_deciseconds - w_start
                w_remaining_time = w_duration - w_elapsed_time + 1
                w_progress = w_elapsed_time / w_duration

                frame_features = [
                    w_elapsed_time, w_remaining_time, w_duration, w_progress,
                    w_speed
                ]

                feature_array.append(list(word_encoding) + frame_features)

        return np.array(feature_array)
Esempio n. 53
0
async def explain(ctx: MtgContext, *, thing: Optional[str]) -> None:
    """Answers for Frequently Asked Questions
`!explain`. Get a list of things the bot knows how to explain.
`!explain {thing}`. Print commonly needed explanation for 'thing'."""
    num_tournaments = inflect.engine().number_to_words(
        len(tournaments.all_series_info()))
    explanations: Dict[str, Tuple[str, Dict[str, str]]] = {
        'archetype': (
            """
            Archetypes are manually reviewed by a human on an irregular basis.
            Prior to that a deck will have either its assigned archetype on Gatherling (tournament decks), nothing, or a best-guess based on the most similar reviewed deck (league decks).
            If you want to help out let us know.
            """,
            {},
        ),
        'bugs': (
            'We keep track of cards that are bugged on Magic Online. We allow the playing of cards with known bugs in Penny Dreadful under certain conditions. See the full rules on the website.',
            {
                'Known Bugs List':
                fetcher.decksite_url('/bugs/'),
                'Tournament Rules':
                fetcher.decksite_url('/tournaments/#bugs'),
                'Bugged Cards Database':
                'https://github.com/PennyDreadfulMTG/modo-bugs/issues/',
            },
        ),
        'deckbuilding': (
            """
            The best way to build decks is to use a search engine that supports Penny Dreadful legality (`f:pd`) like Scryfall.
            You can find Penny Dreadful decklists from tournaments, leagues and elsewhere at pennydreadfulmagic.com.
            """,
            {
                'Scryfall': 'https://scryfall.com/',
                'Latest Decks': fetcher.decksite_url('/'),
                'Legal Cards List': 'http://pdmtgo.com/legal_cards.txt',
            },
        ),
        'decklists': (
            """
            You can find Penny Dreadful decklists from tournaments, leagues and elsewhere at pennydreadfulmagic.com
            """,
            {
                'Latest Decks': fetcher.decksite_url('/'),
            },
        ),
        'doorprize': (
            "The door prize is 1 tik credit with Cardhoarder, awarded to one randomly-selected player that completes the Swiss rounds but doesn't make top 8.",
            {},
        ),
        'language': (
            """
            To change the language you see the site in use the language switcher in the top-left hand corner (desktop only) or follow the link below for English.
            """,
            {
                'PDM in English': fetcher.decksite_url('/?locale=en'),
            },
        ),
        'league': (
            """
            Leagues last for roughly a month. You may enter any number of times but only one deck at a time.
            You play five matches per run. You can join the league at any time.
            To find a game sign up and then create a game in Constructed, Specialty, Freeform Tournament Practice with "Penny Dreadful League" as the comment.
            Top 8 finishers on each month's league leaderboard win credit with MTGO Traders.
            When you complete a five match league run for the first time ever you will get 1 tik credit with MTGO Traders (at the end of the month).
            """,
            {
                'More Info': fetcher.decksite_url('/league/'),
                'Sign Up': fetcher.decksite_url('/signup/'),
                'Current League': fetcher.decksite_url('/league/current/'),
            },
        ),
        'netdecking': (
            """
            Netdecking is both allowed and encouraged. Most deck creators are happy when others play their decks.
            You can get a glimpse of the meta via the archetypes link below. Sort by win percent to find the best-performing decks.
            """,
            {
                'Archetypes': fetcher.decksite_url('/archetypes/'),
                'All Decklists': fetcher.decksite_url('/decks/'),
            },
        ),
        'noshow': (
            """
            If your opponent does not join your game please @-message them on Discord and contact them on Magic Online.
            If you haven't heard from them by 10 minutes after the start of the round let the Tournament Organizer know.
            You will receive a 2-0 win and your opponent will be dropped from the competition.
            """,
            {},
        ),
        'onegame': (
            """
            If your opponent concedes or times out before the match completes, PDBot will not report automatically.
            If you feel enough of a match was played you may manually report 2-x where x is the number of games your opponent won.
            """,
            {
                'Report': fetcher.decksite_url('/report/'),
            },
        ),
        'playing': (
            """
            To get a match go to Constructed, Specialty, Freeform Tournament Practice on MTGO and create a match with "Penny Dreadful" in the comments.
            """,
            {},
        ),
        'prices': (
            f"""
            The price output contains current price.
            If the price is low enough it will show season-low and season-high also.
            If the card has been {card_price.MAX_PRICE_TEXT} or less at any point this season it will also include the amount of time (as a percentage) the card has spent at {card_price.MAX_PRICE_TEXT} or below this week, month and season.
            """,
            {},
        ),
        'prizes': (
            """
            Gatherling tournaments pay prizes to the Top 8 in Cardhoarder credit.
            This credit will appear when you trade with one of their bots on Magic Online.
            One player not making Top 8 but playing all the Swiss rounds will be randomly allocated the door prize.
            Prizes are credited once a week usually on the Friday or Saturday following the tournament but may sometimes take longer.
            Note: Penny Dreadful Tuesdays are player-supported, get your tix from a trade with swiftwarkite2 on MTGO.
            """,
            {
                'More Info': fetcher.decksite_url('/tournaments/'),
            },
        ),
        'promos': (
            """
            """,
            {},
        ),
        'replay': (
            """
            You can play the same person a second time on your league run as long as they have started a new run. The same two runs cannot play each other twice.
            """,
            {},
        ),
        'reporting': (
            """
            """,
            {},
        ),
        'retire': (
            'To retire from a league run message PDBot on MTGO with `!retire`. If you have authenticated with Discord on pennydreadfulmagic.com you can say `!retire` on Discord or retire on the website.',
            {
                'Retire': fetcher.decksite_url('/retire/'),
            },
        ),
        'rotation': (
            f"""
            Legality is set a week after the release of a Standard-legal set on Magic Online.
            Prices are checked every hour for a week from the set release. Anything {card_price.MAX_PRICE_TEXT} or less for half or more of all checks is legal for the season.
            Any version of a card on the legal cards list is legal.
            """,
            {},
        ),
        'spectating': (
            """
            Spectating tournament and league matches is allowed and encouraged.
            Please do not write anything in chat except to call PDBot's `!record` command to find out the current score in games.
            """,
            {},
        ),
        'tournament': (
            """
            We have {num_tournaments} free-to-enter weekly tournaments that award trade credit prizes from Cardhoarder.
            They are hosted on gatherling.com along with a lot of other player-run Magic Online events.
            """.format(num_tournaments=num_tournaments),
            {
                'More Info': fetcher.decksite_url('/tournaments/'),
                'Sign Up': 'https://gatherling.com/',
            },
        ),
        'username': (
            """
            Please change your Discord username to include your MTGO username so we can know who you are.
            To change, right-click your username.
            This will not affect any other Discord channel.
            """,
            {},
        ),
        'verification': (
            """
            Gatherling verification is currently broken.
            It no longer does anything except put a green tick by your name anyway.
            """,
            {},
        ),
    }
    reporting_explanations: Dict[str, Tuple[str, Dict[str, str]]] = {
        'tournament': (
            """
            For tournaments PDBot is information-only, *both* players must report near the top of Player CP (or follow the link at the top of any Gatherling page).
            """,
            {
                'Gatherling': 'https://gatherling.com/player.php',
            },
        ),
        'league': (
            """
            If PDBot reports your league match in #league in Discord you don't need to do anything. If not, either player can report.
            """,
            {
                'League Report': fetcher.decksite_url('/report/'),
            },
        ),
    }
    keys = sorted(explanations.keys())
    explanations['drop'] = explanations['retire']
    explanations['legality'] = explanations['rotation']
    explanations['spectate'] = explanations['spectating']
    explanations['tournaments'] = explanations['tournament']
    explanations['watching'] = explanations['spectating']
    explanations['spectate'] = explanations['spectating']
    explanations['verify'] = explanations['verification']
    # strip trailing 's' to make 'leagues' match 'league' and simliar without affecting the output of `!explain` to be unnecessarily plural.
    if thing is None:
        thing = ''
    word = thing.lower().replace(' ', '').rstrip('s')
    if len(word) > 0:
        for k in explanations:
            if k.startswith(word):
                word = k
    try:
        if word == 'reporting':
            if is_tournament_channel(ctx.channel):
                explanation = reporting_explanations['tournament']
            else:
                explanation = reporting_explanations['league']
        elif word == 'promos':
            explanation = promo_explanation()
        else:
            explanation = explanations[word]

        s = '{text}\n'.format(text=textwrap.dedent(explanation[0]))
    except KeyError:
        usage = 'I can explain any of these things: {things}'.format(
            things=', '.join(sorted(keys)))
        await ctx.send(usage)
        return
    for k in sorted(explanation[1].keys()):
        s += '{k}: <{v}>\n'.format(k=k, v=explanation[1][k])
    await ctx.send(s)
Esempio n. 54
0
 def __init__(self, store_name):
     '''Connect to instance of TinyDB.'''
     self.db = TinyDB(store_name)
     self.pluralize = inflect.engine().plural_noun
    def set_or_extend_module_description_and_final_stats(self, module: Module,
                                                         module_sentences: ModuleSentences = None,
                                                         description: str = None,
                                                         additional_postfix_terms_list: List[str] = None,
                                                         additional_postfix_final_word: str = None,
                                                         use_single_form: bool = False):
        """set description text and stats for a specific module

        if previous data is present in the specified module, the provided description and the stats are merged with the
        existing ones

        Args:
            module (Module): the description module to update
            module_sentences (ModuleSentences): optional - module sentences object from which to take the description
                and stats
            description (str): optional - description text to be added
            additional_postfix_terms_list (List[str]): optional - list of terms to be merged and added as postfix to the
                description
            additional_postfix_final_word: optional - word to be added at the end of the postfix (automatically
                converted to plural if the list of terms has more than one element)
            use_single_form (bool): whether to use a single form for the final word without transforming it to plural
        """
        desc = ""
        if module_sentences:
            desc = module_sentences.get_description()
            self.stats.trimmed = self.stats.trimmed or any([sent.trimmed for sent in module_sentences.sentences])
        elif description:
            inflect_engine = inflect.engine()
            desc = description
            if additional_postfix_terms_list and len(additional_postfix_terms_list) > 0:
                desc += " " + concatenate_words_with_oxford_comma(additional_postfix_terms_list) + " " + \
                        (additional_postfix_final_word if use_single_form or len(additional_postfix_terms_list) == 1
                         else inflect_engine.plural_noun(additional_postfix_final_word))
        if desc:
            if self.description and self.description != self.gene_name:
                self.description = self.description[0:-1] + "; " + desc + "."
            else:
                if not self.add_gene_name or not self.gene_name:
                    desc = desc[0].upper() + desc[1:]
                self.description = self.gene_name + " " + desc + "." if self.add_gene_name else desc + "."
            if module == Module.GO_FUNCTION:
                self.go_function_description = self._concatenate_description(desc, self.go_function_description)
                self.stats.set_final_go_ids_f = self._get_merged_ids(module_sentences.get_ids(experimental_only=False),
                                                                     self.stats.set_final_go_ids_f)
                self.stats.set_final_experimental_go_ids_f = self._get_merged_ids(module_sentences.get_ids(
                    experimental_only=True), self.stats.set_final_experimental_go_ids_f)
            elif module == Module.GO_PROCESS:
                self.go_process_description = self._concatenate_description(desc, self.go_process_description)
                self.stats.set_final_go_ids_p = self._get_merged_ids(module_sentences.get_ids(experimental_only=False),
                                                                     self.stats.set_final_go_ids_p)
                self.stats.set_final_experimental_go_ids_p = self._get_merged_ids(module_sentences.get_ids(
                    experimental_only=True), self.stats.set_final_experimental_go_ids_p)
            elif module == Module.GO_COMPONENT:
                self.go_component_description = self._concatenate_description(desc, self.go_component_description)
                self.stats.set_final_go_ids_c = self._get_merged_ids(module_sentences.get_ids(experimental_only=False),
                                                                     self.stats.set_final_go_ids_c)
                self.stats.set_final_experimental_go_ids_c = self._get_merged_ids(module_sentences.get_ids(
                    experimental_only=True), self.stats.set_final_experimental_go_ids_c)
            elif module == Module.EXPRESSION:
                self.tissue_expression_description = self._concatenate_description(desc, self.tissue_expression_description)
                self.stats.set_final_expression_ids = self._get_merged_ids(
                    module_sentences.get_ids(experimental_only=False), self.stats.set_final_expression_ids)
            elif module == Module.EXPRESSION_CLUSTER_GENE:
                self.gene_expression_cluster_description = self._concatenate_description(
                    desc, self.gene_expression_cluster_description)
            elif module == Module.EXPRESSION_CLUSTER_ANATOMY:
                self.anatomy_expression_cluster_description = self._concatenate_description(
                    desc, self.anatomy_expression_cluster_description)
            elif module == Module.EXPRESSION_CLUSTER_MOLECULE:
                self.molecule_expression_cluster_description = self._concatenate_description(
                    desc, self.molecule_expression_cluster_description)
            elif module == Module.DO_EXPERIMENTAL:
                self.do_experimental_description = self._concatenate_description(desc, self.do_experimental_description)
                self.stats.set_final_do_ids = self._get_merged_ids(module_sentences.get_ids(experimental_only=False),
                                                                   self.stats.set_final_do_ids)
            elif module == Module.DO_BIOMARKER:
                self.do_biomarker_description = self._concatenate_description(desc, self.do_biomarker_description)
                self.stats.set_final_do_ids = self._get_merged_ids(module_sentences.get_ids(experimental_only=False),
                                                                   self.stats.set_final_do_ids)
            elif module == Module.DO_ORTHOLOGY:
                self.do_orthology_description = self._concatenate_description(desc, self.do_orthology_description)
                self.stats.set_final_do_ids = self._get_merged_ids(module_sentences.get_ids(experimental_only=False),
                                                                   self.stats.set_final_do_ids)
            elif module == Module.SISTER_SP:
                self.sister_species_description = self._concatenate_description(desc, self.sister_species_description)
            elif module == Module.ORTHOLOGY:
                self.orthology_description = self._concatenate_description(desc, self.orthology_description)
            elif module == Module.INFO_POOR_HUMAN_FUNCTION:
                self.human_gene_function_description = self._concatenate_description(desc, self.human_gene_function_description)
            elif module == Module.PROTEIN_DOMAIN:
                self.protein_domain_description = self._concatenate_description(desc, self.protein_domain_description)
            # Multimodule fields
            if module == Module.GO_PROCESS or module == Module.GO_FUNCTION or module == Module.GO_COMPONENT:
                self.go_description = self._merge_descriptions(
                    [self.go_function_description, self.go_process_description, self.go_component_description])
            if module == Module.DO_EXPERIMENTAL or module == Module.DO_BIOMARKER or module == Module.DO_ORTHOLOGY:
                self.do_description = self._merge_descriptions(
                    [self.do_experimental_description, self.do_biomarker_description, self.do_orthology_description])
                self.stats.number_final_do_term_covering_multiple_initial_do_terms = self.do_description.count(
                    "(multiple)")
Esempio n. 56
0
    def post(self, request, format=None):
        try:
            postData = request.data
            imagePath = postData["imagePath"]
            imagePath = str(MEDIA_ROOT) + "images/" + str(
                dict(request.FILES)["imagePath"][0].name)
            # print imagePath
            numberOfCabins = int(postData["numberOfCabins"])
            cascPath = str(STATIC_URL) + str(
                "xml/haarcascade_frontalface_default.xml")
            # numberOfCabins=4
            # imagePath = "c04.jpg"
            faceCascade = cv2.CascadeClassifier(cascPath)
            image = cv2.imread(imagePath)
            height, width, channels = image.shape
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            print height, width, channels

            faces = faceCascade.detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30)
                #flags = cv2.cv.CV_HAAR_SCALE_IMAGE
            )

            print("Found {0} faces!".format(len(faces)))
            print faces
            print height, width, channels

            # # Draw a rectangle around the faces
            # for (x, y, w, h) in faces:
            #     #cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
            #     print x,y,w,h

            # cv2.imshow("Faces found", image)
            # # cv2.waitKey(0)

            # print len(faces)
            # print width
            cabinSpace = width / numberOfCabins
            totalCabins = numberOfCabins

            cabinChunk = []
            for i in range(numberOfCabins):
                cabin = []
                cabinInfo = {}
                cabinStart = cabinSpace * i
                cabinEnd = cabinSpace * (i + 1)
                cabin.append(cabinStart)
                cabin.append(cabinEnd)
                cabinInfo["cabinLocation"] = cabin
                cabinInfo["isCabinFull"] = True
                cabinInfo["cabinNumber"] = i
                cabinChunk.append(cabinInfo)

            # if len(faces)<numberOfCabins:
            if True:
                cabinStatus = []
                for cabin in cabinChunk:
                    returnDict = {}
                    cabinInfoBool = []
                    for i in faces:
                        if i[0] in range(cabin["cabinLocation"][0],
                                         cabin["cabinLocation"][1]):
                            cabinInfoBool.append(True)
                        else:
                            cabinInfoBool.append(False)
                    temp = False
                    for i in cabinInfoBool:
                        temp = i or temp
                    cabin["isCabinFull"] = temp
                    if cabin["isCabinFull"] == True:
                        print "Cabin No: " + str(cabin["cabinNumber"] +
                                                 1) + " is Full."
                        result = "Cabin No: " + str(cabin["cabinNumber"] +
                                                    1) + " is Full."

                        returnDict["cabinNumber"] = inflect.engine(
                        ).number_to_words(str(cabin["cabinNumber"] + 1))
                        returnDict["isCabinFull"] = True
                        returnDict["result"] = result
                        cabinStatus.append(returnDict)

                    elif cabin["isCabinFull"] == False:
                        print "Cabin No: " + str(cabin["cabinNumber"] +
                                                 1) + " is Empty."
                        result = "Cabin No: " + str(cabin["cabinNumber"] +
                                                    1) + " is Empty."
                        returnDict["cabinNumber"] = inflect.engine(
                        ).number_to_words(str(cabin["cabinNumber"] + 1))
                        returnDict["isCabinFull"] = False
                        returnDict["result"] = result
                        cabinStatus.append(returnDict)

            # print cabinChunk

            return Response({"success": True, "data": cabinStatus})
        except Exception as e:
            print e
            return Response({"success": True, "data": str(e)})
Esempio n. 57
0
 def test_swap_parts_of_speech(self):
     great_expectations_sample = ''.join([
         "It was then I began to understand that everything in the room had stopped, like the watch and the ",
         "clock, a long time ago. I noticed that Miss Havisham put down the jewel exactly on the spot from which ",
         "she had taken it up. As Estella dealt the cards, I glanced at the dressing-table again, and saw that the",
         " shoe upon it, once white, now yellow, had never been worn. I glanced down at the foot from which the ",
         "shoe was absent, and saw that the silk stocking on it, once white, now yellow, had been trodden ragged. ",
         "Without this arrest of everything, this standing still of all the pale decayed objects, not even the ",
         "withered bridal dress on the collapsed form could have looked so like grave-clothes, or the long veil ",
         "so like a shroud."
     ])  # a novel by Charles Dickens
     great_expectations_nouns = [
         'everything', 'room', 'watch', 'clock', 'time', 'jewel', 'spot',
         'cards', 'dressing', 'table', 'shoe', 'foot', 'silk', 'arrest',
         'objects', 'dress', 'form', 'grave', 'clothes', 'veil', 'shroud'
     ]
     great_expectations_adjectives = [
         'long', 'white', 'yellow', 'absent', 'pale', 'decayed', 'bridal'
     ]
     spacy_nlp = spacy.load('en_core_web_sm', disable=['ner'])
     spacy_nlp.remove_pipe("parser")
     tokenized_ge_sample = spacy_nlp(great_expectations_sample)
     great_expectations_pos_by_word_number = {}
     for i, token in enumerate(tokenized_ge_sample):
         if token.pos_ in ['ADJ', 'NOUN']:
             great_expectations_pos_by_word_number[i] = token.pos_
     shunned_house_sample = ''.join([
         "Yet after all, the sight was worse than I had dreaded. There are horrors beyond horrors, and this was one",
         " of those nuclei of all dreamable hideousness which the cosmos saves to blast an accursed and unhappy ",
         "few. Out of the fungus-ridden earth steamed up a vaporous corpse-light, yellow and diseased, which ",
         "bubbled and lapped to a gigantic height in vague outlines half human and half monstrous, through which I ",
         "could see the chimney and fireplace beyond. It was all eyes—wolfish and mocking—and the rugose insectoid ",
         "head dissolved at the top to a thin stream of mist which curled putridly about and finally vanished up ",
         "the chimney. I say that I saw this thing, but it is only in conscious retrospection that I ever ",
         "definitely traced its damnable approach to form. At the time, it was to me only a seething, dimly ",
         "phosphorescent cloud of fungous loathsomeness, enveloping and dissolving to an abhorrent plasticity the ",
         "one object on which all my attention was focused."
     ])  # a story by H.P. Lovecraft
     tokenized_shunned_house_sample = spacy_nlp(great_expectations_sample)
     shunned_house_pos_by_word_number = {}
     for i, token in enumerate(tokenized_shunned_house_sample):
         if token.pos_ in ['ADJ', 'NOUN']:
             shunned_house_pos_by_word_number[i] = token.pos_
     shunned_house_nouns = [
         'sight', 'horrors', 'nuclei', 'hideousness', 'cosmos', 'fungus',
         'earth', 'corpse', 'height', 'outlines', 'half', 'chimney',
         'fireplace', 'eyes', 'wolfish', 'mocking', 'head', 'top', 'stream',
         'mist', 'thing', 'retrospection', 'approach', 'time', 'cloud',
         'loathsomeness', 'enveloping', 'dissolving', 'abhorrent',
         'plasticity', 'object', 'attention'
     ]
     shunned_house_adjectives = [
         'worse', 'dreamable', 'accursed', 'unhappy', 'few', 'vaporous',
         'light', 'yellow', 'diseased', 'gigantic', 'vague', 'human',
         'monstrous', 'rugose', 'insectoid', 'thin', 'conscious',
         'damnable', 'seething', 'phosphorescent', 'fungous'
     ]
     shunned_house_pos_by_word_number = {}
     # Just test swapping nouns and adjectives for now
     new_ge_sample, new_sh_sample = swap_parts_of_speech(
         great_expectations_sample, shunned_house_sample)
     print(
         "\n\nAdjectives and Verbs Taken From Lovecraft And Swapped into Great Expectations:"
     )
     print(new_ge_sample + "\n")
     print(
         "Adjectives and Verbs Taken From Great Expectations And Swapped into Great Lovecraft:"
     )
     print(new_sh_sample)
     new_ge_doc, new_sh_doc = spacy_nlp(new_ge_sample), spacy_nlp(
         new_ge_sample)
     # Since the Dickens sample has fewer nouns and adjectives, all the Dickens nounsa and adjectives
     # should be replaced by Lovecraft's words
     inflector = inflect.engine()
     for i, token in enumerate(new_ge_doc):
         expected_pos = great_expectations_pos_by_word_number.get(i, None)
         if expected_pos is 'NOUN':
             self.assertTrue(
                 token.text in shunned_house_nouns
                 or inflector.plural(token.text) in shunned_house_nouns or
                 inflector.singular_noun(token.text) in shunned_house_nouns)
         elif token.pos is 'ADJ':
             self.assertTrue(token.text in shunned_house_adjectives)
     for i, token in enumerate(new_sh_doc):
         expected_pos = shunned_house_pos_by_word_number.get(i, None)
         if expected_pos is 'ADJ':
             # Since there are only 7 adjectives in the Dickens passage only that many substitutions can occur.
             self.assertTrue(token.text in great_expectations_adjectives
                             or i > 6)
         elif token.pos is 'NOUN':
             # Since there are only 21 nouns in the Dickens passage only that many substitutions can occur.
             self.assertTrue(
                 (token.text in great_expectations_nouns or
                  inflector.plural(token.text) in great_expectations_nouns
                  or inflector.singular_noun(
                      token.text) in great_expectations_nouns) or i > 20)
Esempio n. 58
0
import re
import inflect

_inflect = inflect.engine()

_time_re = re.compile(
    r"""\b
                          ((0?[0-9])|(1[0-1])|(1[2-9])|(2[0-3]))  # hours
                          :
                          ([0-5][0-9])                            # minutes
                          \s*(a\\.m\\.|am|pm|p\\.m\\.|a\\.m|p\\.m)? # am/pm
                          \b""", re.IGNORECASE | re.X)


def _expand_num(n: int) -> str:
    return _inflect.number_to_words(n)


def _expand_time_english(match: "re.Match") -> str:
    hour = int(match.group(1))
    past_noon = hour >= 12
    time = []
    if hour > 12:
        hour -= 12
    elif hour == 0:
        hour = 12
        past_noon = True
    time.append(_expand_num(hour))

    minute = int(match.group(6))
    if minute > 0:
Esempio n. 59
0
    def trace_to_file(self, remove_permpows=True):
        def render_holdsat(holds, time, maximum):
            def prefix(x):
                holdsat_p = r"\item"
                holdsat_p += r"\textbf{" if time == 0 or (
                    not x in holds[time - 1]) else r"{"
                holdsat_p += r"\sout{" if time < maximum and (
                    not x in holds[time + 1]) else r"{"
                return holdsat_p

            suffix = r"}}"
            if not holds[time]:  # 20170301 JAP: not sure if test ever succeeds
                return r"\item"  # to avoid LaTeX "missing item" error
            returnVal = ""
            for x in holds[time]:
                returnVal += (prefix(x) + (str(x.arguments[0]) + ": " + str(
                    x.arguments[1]) + suffix).replace('_', '\_').replace(
                        ',', ', ').replace('(', '(\\allowbreak{}') + "\n")
            return returnVal

        def render_observed(observed_p, time):
            stem = ""
            obs = observed_p[time]
            if len(obs) != 1:
                return
            else:
                obs = obs[0]
            if obs.name == "observed":
                stem = str(obs.arguments[0])
            else:
                print("% Unrecognised observation", observed_p[time])
            return stem.rstrip().replace('_', '\_').replace(',', ', ').replace(
                '(', '(\\allowbreak{}')

        def render_occurred(occurred_p, time):
            returnVal = ""
            for x in occurred_p[time]:
                returnVal += (str(x.arguments[0]) + ": " +
                              str(x.arguments[1]) + r"\\"
                              "\n").replace('_',
                                            '\_').replace(',', ', ').replace(
                                                '(', '(\\allowbreak{}')
            return returnVal

        latex_trace_header = r"""
        \documentclass{article}
        \usepackage{todonotes}
        \usepackage{array}
        \usepackage{longtable}
        \usepackage{enumitem}
        \usepackage{tikz}
        \pagestyle{empty}
        \thispagestyle{empty}
        \usetikzlibrary{shadows}
        \usetikzlibrary{decorations}
        \usetikzlibrary{shapes}
        \usetikzlibrary{arrows}
        \usetikzlibrary{calc}
        \usetikzlibrary{fit}
        \usetikzlibrary{backgrounds}
        \usetikzlibrary{positioning}
        \usetikzlibrary{chains}
        \usetikzlibrary{scopes}
        \renewcommand*\familydefault{\sfdefault} %% Only if the base font of the document is to be sans serif
        \usepackage[normalem]{ulem}
        \newenvironment{events}
        {\begin{tabular}{>{\centering}m{\tableWidth}}}
        {\end{tabular}}
        \newenvironment{states}
        {\begin{minipage}{\tableWidth}\raggedright\begin{description}[align=left,leftmargin=1em,noitemsep,labelsep=\parindent]}
        {\end{description}\end{minipage}}
        \begin{document}
        """
        observed = {
            t - 1: self.trace.trace[t].observed
            for t in range(1, len(self.trace.trace))
        }
        occurred = defaultdict(list)
        for t in range(1, len(self.trace.trace)):
            # 20170328 JAP: added sorting
            occurred[t - 1] = sorted(self.trace.trace[t].occurred,
                                     key=lambda x: x.arguments[0].name)
        holdsat = defaultdict(list)
        for t in range(0, len(self.trace.trace)):
            # 20170328 JAP: added filtering and sorting here and removed from later
            if remove_permpows:
                holdsat[t] = sorted(
                    (f for f in self.trace.trace[t].holdsat
                     if not (f.arguments[0].name in
                             ["perm", "pow", "ipow", "gpow", "tpow"])),
                    key=lambda x: x.arguments[0].name)
            else:
                holdsat[t] = sorted(self.trace.trace[t].holdsat,
                                    key=lambda x: x.arguments[0].name)

        labels = {}
        states = {}
        tableWidth = "5cm"
        p = inflect.engine()  # to provide translation of numbers to words
        selected_states, selected_events = (set(range(0,
                                                      len(observed) + 1)),
                                            set(range(0, len(observed))))
        with open(self.output_file_name, 'w') as tfile:
            print(latex_trace_header, file=tfile)
            # define transition labels as macros \Ezero ...
            print("% Event macro definitions", file=tfile)
            print(
                "% ------------------------------------------------------------------------",
                file=tfile)
            for t in selected_events:
                labels[t] = (
                    r"\newcommand{" +
                    '\E{}'.format(p.number_to_words(t).replace('-', '')) +
                    r"}{\begin{events}"
                    "\n" + render_observed(observed, t) + r"\\"
                    "\n"
                    r"\em " + render_occurred(occurred, t) + r"\end{events}}"
                    "\n")
                print(labels[t], file=tfile)
            # define state tables as macros \Szero ...
            print("% State macro definitions", file=tfile)
            print(
                "% ------------------------------------------------------------------------",
                file=tfile)
            for t in selected_states:
                fluents = render_holdsat(holdsat, t, max(selected_states))
                states[t] = (
                    r"\newcommand{" +
                    '\S{}'.format(p.number_to_words(t).replace('-', '')))
                if fluents == "":
                    states[t] = states[t] + r"}{$\emptyset$}" "\n"
                else:
                    states[t] = (states[t] + r"}{\begin{states}"
                                 "\n" + fluents + r"\end{states}}"
                                 "\n")
                print(states[t], file=tfile)
            # output trace as a tikzpicture in resizebox in a longtable
            print("% Institutional trace", file=tfile)
            print(
                "% ------------------------------------------------------------------------",
                file=tfile)
            print(r"\newlength{\tableWidth}"
                  "\n" + "\\setlength{{\\tableWidth}}{{{tw}}}\n\n".format(
                      tw=tableWidth) + r"\begin{longtable}{@{}l@{}}"
                  "\n"
                  r"\resizebox{\textwidth}{!}{"
                  "\n"
                  r"\begin{tikzpicture}"
                  "\n"
                  "[\nstart chain=trace going right,",
                  file=tfile)
            for t in selected_states:
                print("start chain=state{} going down,".format(t), file=tfile)
            print(
                "node distance=1cm and 5.2cm"
                "\n]"
                "\n{{ [continue chain=trace]",
                file=tfile)
            for t in selected_states:
                print(r"\node[circle,draw,on chain=trace]" +
                      "(i{i}) {{$S_{{{i}}}$}};".format(i=t),
                      file=tfile)
            for t in selected_states:
                print(
                    "{{ [continue chain=state{i} going below]\n"
                    "\\node [on chain=state{i},below=of i{i},"
                    "rectangle,draw,inner frame sep=0pt] (s{i}) {{".format(i=t)
                    +
                    r'\S{i}'.format(i=p.number_to_words(t).replace('-', '')) +
                    "};} % end node and chain\n" +
                    r"\draw (i{}) -- (s{});".format(t, t),
                    file=tfile)
            print(r"}}", file=tfile)
            # output lines between states labelled with events
            # observed/occurred
            for t in selected_events:
                print(
                    r"\draw[-latex,thin](i{x}) -- node[above]{{\E{y}}}(i{z});".
                    format(x=t,
                           y=p.number_to_words(t).replace('-', ''),
                           z=t + 1),
                    file=tfile)
            # end tikzpicture/resizebox/table
            print(
                r'\end{tikzpicture}}'
                "\n"
                r"\end{longtable}"
                "\n"
                r"\end{document}",
                file=tfile)
Esempio n. 60
0
from datetime import datetime, timezone
import uuid

import inflect

from sqlalchemy import Column, DateTime, Integer, String
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declared_attr

get_plural = inflect.engine()


class BaseMixin:
    '''
    Provides id, created_at and last_modified columns
    '''
    @declared_attr
    def __tablename__(cls):
        try:
            table_name = cls.__tablename__
        except RecursionError:
            pass
        plural_name = get_plural.plural_noun(cls.__name__.lower())
        return plural_name

    id = Column(Integer, primary_key=True)
    uuid = Column(UUID(as_uuid=True), unique=True, default=uuid.uuid4)
    created_at = Column(DateTime, default=datetime.now(timezone.utc))
    last_modified = Column(DateTime,
                           default=datetime.now(timezone.utc),
                           onupdate=datetime.now(timezone.utc))