def __init__(self, source, lcount): assert lcount self.index = len(DOCTYPES) self.doctype = None for doctype in source.doctypes: doctype = doctype.id if doctype and DOCTYPES.index(doctype) < self.index: self.index = DOCTYPES.index(doctype) self.doctype = doctype # the number of pages is divided by number of doctypes times number of # described languages self.pages = int( ceil( float(source.pages_int or 0) / ((len(source.doctypes) or 1) * lcount))) if self.doctype == 'grammar' and self.pages >= 300: self.doctype = 'long_grammar' self.index = -1 self.year = source.year_int self.id = source.id self.name = source.name
def __init__(self, source): self.index = len(DOCTYPES) self.doctype = None for doctype in source.doctypes: doctype = doctype.id if doctype and DOCTYPES.index(doctype) < self.index: self.index = DOCTYPES.index(doctype) self.doctype = doctype # the number of pages is divided by number of doctypes times number of # described languages self.pages = int(ceil( float(source.pages_int or 0) / ((len(source.doctypes) or 1) * len(source.languages)))) self.year = source.year_int self.id = source.id self.name = source.name
def __init__(self, source): self.index = len(DOCTYPES) self.doctype = None doctypes = (source.doctypes_str or '').split(', ') for doctype in doctypes: if doctype and DOCTYPES.index(doctype) < self.index: self.index = DOCTYPES.index(doctype) self.doctype = doctype # the number of pages is disvided by number of doctypes times number of # described languages self.pages = int(ceil( float(source.pages_int or 0) / (len(doctypes)*len(source.languages)))) self.year = source.year_int self.id = source.id self.name = source.name
def __init__(self, source, lcount): assert lcount self.index = len(DOCTYPES) self.doctype = None for doctype in source.doctypes: doctype = doctype.id if doctype and DOCTYPES.index(doctype) < self.index: self.index = DOCTYPES.index(doctype) self.doctype = doctype # the number of pages is divided by number of doctypes times number of # described languages self.pages = int(ceil( float(source.pages_int or 0) / ((len(source.doctypes) or 1) * lcount))) if self.doctype == 'grammar' and self.pages >= 300: self.doctype = 'long_grammar' self.index = -1 self.year = source.year_int self.id = source.id self.name = source.name
SimplifiedDoctype(i, *args) for i, args in enumerate([ ('long grammar', 'c', '00ff00'), # long grammar & extensive description of most elements of the grammar $\approx 300+$ pages ('grammar', 's', 'a0fb75'), # grammar & a description of most elements of the grammar ($\approx 150$ pages) & ('grammar sketch', 'd', 'ff6600'), # grammar sketch & a less extensive description of many elements of the grammar ($\approx 50$ pages) ('phonology/text', 't', 'ff4400'), # ('wordlist or less', 'f', 'ff0000'), ]) ] SIMPLIFIED_DOCTYPE_MAP = defaultdict(lambda: SIMPLIFIED_DOCTYPES[4]) SIMPLIFIED_DOCTYPE_MAP[-1] = SIMPLIFIED_DOCTYPES[0] SIMPLIFIED_DOCTYPE_MAP['long_grammar'] = SIMPLIFIED_DOCTYPES[0] for i, dt in enumerate(DOCTYPES): if i <= 1: SIMPLIFIED_DOCTYPE_MAP[i] = SIMPLIFIED_DOCTYPES[i + 1] # i.e. grammar or grammarsketch SIMPLIFIED_DOCTYPE_MAP[dt] = SIMPLIFIED_DOCTYPES[i + 1] elif 1 < i < DOCTYPES.index('wordlist'): SIMPLIFIED_DOCTYPE_MAP[i] = SIMPLIFIED_DOCTYPES[3] SIMPLIFIED_DOCTYPE_MAP[dt] = SIMPLIFIED_DOCTYPES[3] Endangerment = namedtuple('Endangerment', 'ord name color shape') ENDANGERMENTS = [ Endangerment(i, *args) for i, args in enumerate([ ('safe', '00ff00', 'c'), ('vulnerable', 'a0fb75', 'c'), ('definitely endangered', 'ff6600', 's'), ('severely endangered', 'ff4400', 'd'), ('critically endangered', 'ff0000', 't'), ('extinct', '000000', 'f'), ]) ] ENDANGERMENT_MAP = defaultdict(
SimplifiedDoctype(i, *args) for i, args in enumerate([ ('long grammar', 'c', '00ff00'), # long grammar & extensive description of most elements of the grammar $\approx 300+$ pages ('grammar', 's', 'a0fb75'), # grammar & a description of most elements of the grammar ($\approx 150$ pages) & ('grammar sketch', 'd', 'ff6600'), # grammar sketch & a less extensive description of many elements of the grammar ($\approx 50$ pages) ('phonology/text', 't', 'ff4400'), # ('wordlist or less', 'f', 'ff0000'), ]) ] SIMPLIFIED_DOCTYPE_MAP = defaultdict(lambda: SIMPLIFIED_DOCTYPES[4]) SIMPLIFIED_DOCTYPE_MAP[-1] = SIMPLIFIED_DOCTYPES[0] SIMPLIFIED_DOCTYPE_MAP['long_grammar'] = SIMPLIFIED_DOCTYPES[0] for i, dt in enumerate(DOCTYPES): if i <= 1: SIMPLIFIED_DOCTYPE_MAP[i] = SIMPLIFIED_DOCTYPES[i + 1] # i.e. grammar or grammarsketch SIMPLIFIED_DOCTYPE_MAP[dt] = SIMPLIFIED_DOCTYPES[i + 1] elif 1 < i < DOCTYPES.index('wordlist'): SIMPLIFIED_DOCTYPE_MAP[i] = SIMPLIFIED_DOCTYPES[3] SIMPLIFIED_DOCTYPE_MAP[dt] = SIMPLIFIED_DOCTYPES[3] Endangerment = namedtuple('Endangerment', 'ord name color shape') ENDANGERMENTS = [ Endangerment(i, *args) for i, args in enumerate([ ('not endangered', '00ff00', 'c'), ('threatened', 'a0fb75', 'c'), ('shifting', 'ff6600', 's'), ('moribund', 'ff4400', 'd'), ('nearly extinct', 'ff0000', 't'), ('extinct', '000000', 'f'), ]) ]