예제 #1
0
파일: cwc.py 프로젝트: lngvietthang/omwtk
def gen_stats(corpus_file, report_path=None):
	''' Generate statistics for a text corpus (word count, most frequent words, etc.)
	'''
	report = TextReport(report_path)
	report.header("Stat for %s" % corpus_file)
	line_count = -1
	word_count = 0
	c = Counter()
	with open(corpus_file, 'r', encoding='utf8') as infile:
		lines = infile.readlines()
		line_count = len(lines)
		for line in lines:
			tokens = line.split()
			for token in tokens:
				parts = token.split("/")
				if len(parts) == 2:
					word = parts[0]
					POS  = parts[1]
				else:
					word = parts[0]
					POS  = None
				for spechar in SPECIAL_CHARS:
					word = word.replace(spechar, '')
				word = word.lower().replace("_", " ") # original word form
				if word == '':
					print(token)
				c.count(word)
				word_count += 1
	
	report.writeline("Line count: %s" % line_count)
	report.writeline("Word count: %s" % word_count)
	report.writeline("Word class: %s" % len(c.sorted_by_count()))
	report.writeline("Top %d    :" % TOP_K)
	for item in c.sorted_by_count()[:TOP_K]:
		report.writeline("%s: %s" % (item[0], item[1]), level=1)
	report.writeline("Bottom %d :" % TOP_K)
	for item in c.sorted_by_count()[-TOP_K:]:
		report.writeline("%s: %s" % (item[0], item[1]), level=1)
	report.writeline("-" * 80)
	for item in c.group_by_count():
		report.writeline("%s: %s" % (item[0], ', '.join(DaoPhay.vn_sorted(item[1]))))
예제 #2
0
def gen_stats(corpus_file, report_path=None):
    ''' Generate statistics for a text corpus (word count, most frequent words, etc.)
    '''
    report = TextReport(report_path)
    report.header("Stat for %s" % corpus_file)
    line_count = -1
    word_count = 0
    c = Counter()
    with open(corpus_file, 'r', encoding='utf8') as infile:
        lines = infile.readlines()
        line_count = len(lines)
        for line in lines:
            tokens = line.split()
            for token in tokens:
                parts = token.split("/")
                if len(parts) == 2:
                    word = parts[0]
                    POS = parts[1]
                else:
                    word = parts[0]
                    POS = None
                for spechar in SPECIAL_CHARS:
                    word = word.replace(spechar, '')
                word = word.lower().replace("_", " ")  # original word form
                if word == '':
                    print(token)
                c.count(word)
                word_count += 1
    report.writeline("Line count: %s" % line_count)
    report.writeline("Word count: %s" % word_count)
    report.writeline("Word class: %s" % len(c.sorted_by_count()))
    report.writeline("Top %d    :" % TOP_K)
    for item in c.sorted_by_count()[:TOP_K]:
        report.writeline("%s: %s" % (item[0], item[1]), level=1)
    report.writeline("Bottom %d :" % TOP_K)
    for item in c.sorted_by_count()[-TOP_K:]:
        report.writeline("%s: %s" % (item[0], item[1]), level=1)
    report.writeline("-" * 80)
    for item in c.group_by_count():
        report.writeline("%s: %s" %
                         (item[0], ', '.join(DaoPhay.vn_sorted(item[1]))))
예제 #3
0
파일: helpers.py 프로젝트: letuananh/yawlib
def dump_synset(ss, compact_gloss=False, compact_tags=False, more_compact=True, report_file=None):
    ''' Print synset details for debugging purpose

    Arguments:
        ss            -- Synset object to dump
        compact_gloss -- Don't dump gloss tokens' details
        compact_tags  -- Don't dump tagged senses' details
        more_compact  -- Don't dump full details of synset
        report_file   -- Report file to write to

    '''
    if report_file == None:
        report_file = TextReport() # Default to stdout
    
    if more_compact:
        report_file.header("Synset: %s (terms=%s | keys=%s)" % (ss.get_synsetid(), ss.terms, ss.keys), 'h0')
    else:
        report_file.header("Synset: %s" % ss, 'h0')

    for rgloss in ss.raw_glosses:
        if more_compact:
            if rgloss.cat != 'orig':
                continue
        report_file.print(rgloss)

    gloss_count = itertools.count(1)
    for gloss in ss.glosses:
        print()
        report_file.header("Gloss #%s: %s" % (next(gloss_count), gloss), 'h2')

        # Dump gloss items
        if compact_gloss:
            report_file.print("Tokens => %s" % gloss.get_gramwords(), level=2)
        else:
            for item in gloss.items:
                # print("\t%s - { %s }" % (uniquify(item.get_gramwords()), item))
                report_file.print("%s - { %s }" % (set(item.get_gramwords()), item), level=2)
            report_file.print(("-" * 10), level=1)
        
        # Dump tags
        if compact_tags:
            report_file.print("Tags => %s" % gloss.get_tagged_sensekey(), level=2)
        else:
            for tag in gloss.tags:
                report_file.print("%s" % tag, level=1)
    report_file.print('')
예제 #4
0
def dump_synset(ss,
                compact_gloss=False,
                compact_tags=False,
                more_compact=True,
                report_file=None,
                compact=True):
    ''' Print synset details for debugging purpose

    Arguments:
        ss            -- Synset object to dump
        compact_gloss -- Don't dump gloss tokens' details
        compact_tags  -- Don't dump tagged senses' details
        more_compact  -- Don't dump full details of synset
        report_file   -- Report file to write to

    '''
    if report_file is None:
        report_file = TextReport()  # Default to stdout

    if more_compact:
        report_file.header(
            "Synset: %s (lemmas=%s | keys=%s)" %
            (ss.sid.to_canonical(), ss.lemmas, ss.keys), 'h0')
    else:
        report_file.header("Synset: %s" % ss, 'h0')

    for rgloss in ss.raw_glosses:
        if more_compact:
            if rgloss.cat != 'orig':
                continue
        report_file.print(rgloss)

    gloss_count = itertools.count(1)
    for gloss in ss.glosses:
        if compact:
            report_file.print("({cat}) {txt}".format(cat=gloss.cat,
                                                     txt=gloss.text()))
        else:
            report_file.print('')
            report_file.header("Gloss #%s: %s" % (next(gloss_count), gloss),
                               'h2')

            # Dump gloss items
            if compact_gloss:
                report_file.print("Tokens => %s" % gloss.get_gramwords(),
                                  level=2)
            else:
                for item in gloss.items:
                    # print("\t%s - { %s }" % (uniquify(item.get_gramwords()), item))
                    report_file.print("%s - { %s }" %
                                      (set(item.get_gramwords()), item),
                                      level=2)
                report_file.print(("-" * 10), level=1)
            # Dump tags
            if compact_tags:
                report_file.print("Tags => %s" % gloss.get_tagged_sensekey(),
                                  level=2)
            else:
                for tag in gloss.tags:
                    report_file.print("%s" % tag, level=1)
    report_file.print('')
예제 #5
0
def omw_vs_gwn_def():
    rp = TextReport("data/omw_gwn_report.txt")
    rpdiff = TextReport("data/omw_gwn_diff.txt")
    rptypo = TextReport("data/omw_gwn_typo.txt")
    rpsn = TextReport("data/omw_gwn_sciname.txt")
    c = Counter(TAGS.ORDER)

    # ssids to compare
    diff_ssids = []
    ssids = read_diff_ssids()
    if not ssids:
        print("Generating synset ID list")
        omw_ssids = set(get_omw_synsets())
        gwn_ssids = set(get_gwn_synsets())
        # only care about old GWN synsets
        ssids = omw_ssids.intersection(gwn_ssids)
    else:
        print("Comparing {} synsets loaded from {}".format(len(ssids), ssid_filepath))
    lang = 'eng'
    with omw.ctx() as omw_ctx, gwn.ctx() as gwn_ctx:
        print("Comparing {} synsets".format(len(ssids)))
        for ss in list(ssids):
            ss = str(ss)
            c.count("total")
            tags, odef, gdef = compare_synset(omw, gwn, ss, omw_ctx, gwn_ctx)
            omwss = omw.get_synset(ss, ctx=omw_ctx)
            tags_str = ' '.join('[{}]'.format(t.upper()) for t in tags)
            if TAGS.DIFF in tags:
                diff_ssids.append(ss)
                # [FCB] why did we change?
                gwnss = gwn.get_synset(ss, ctx=gwn_ctx)
                glosses = gwn_ctx.gloss.select('surface = ?', (gwnss.definition,))
                if glosses and len(glosses) > 1:
                    tags.add(TAGS.DUP)
                    ssids = [str(SynsetID.from_string(g.sid)) for g in glosses]
                    reason = "Not unique (Shared among {}) so OMW team changed it".format(' '.join(ssids))
                else:
                    tags.add(TAGS.OMW)
                    defs = omw.sdef.select('synset=? and lang=?', (ss, lang))
                    usrs = {d.usr for d in defs if d.usr}
                    usrs_str = ', '.join(usrs) if usrs else "someone in NTU"
                    reason = "{} made this change.".format(usrs_str)
                tags_str = ' '.join('[{}]'.format(t.upper()) for t in tags)
                rpdiff.header("{} {}".format(tags_str, ss))
                rpdiff.print("OMW: {}".format(omwss.definition))
                rpdiff.print("GWN: {}".format(gdef))
                rpdiff.print("Reason: {}".format(reason))
            if TAGS.SCINAME in tags:
                rpsn.header("{} {}".format(tags_str, ss))
                rpsn.print("OMW: {}".format(omwss.definition))
                rpsn.print("GWN: {}".format(gdef))
            if TAGS.REP in tags or TAGS.TYPO in tags:
                rptypo.header("{} {}".format(tags_str, ss))
                rptypo.print("OMW: {}".format(omwss.definition))
                rptypo.print("GWN: {}".format(gdef))
            # master report
            for tag in tags:
                c.count(tag)
            if not tags:
                c.count(TAGS.IDENT)
            rp.header("{} {}".format(tags_str, ss))
            rp.print("OMW: {}".format(omwss.definition))
            rp.print("GWN: {}".format(gdef))

    # done
    c.summarise(report=rp)
    with open('data/omw_gwn_diff_ssids.txt', 'wt') as diff_ssid_file:
        for ss in diff_ssids:
            diff_ssid_file.write('{}\n'.format(ss))