def loadPMIDictionary():#LHS: a dictionary with the lexical pointer as the key, and the complements as the value (for debugging)
        pmidic = {}
        pmi_f = openCSV("lexica/PMI_Dictionary.txt", '\t')

        # skip the header row
        iterRows = iter(pmi_f)
        next(iterRows)

        # get all of the complements for each lexical pointer
        for row in iterRows:
            complement = row[7]
            if complement not in ["None", "clause", "inf"]:
                complement = utf8ToTransliteration(complement)

            verbRowConverter.pmi[int(row[1])].append(complement)

            lp = row[1]
            if lp in pmidic:#there's already an entry
                pmidic[lp].append(complement)
            else:
                pmidic[lp] = [complement]
        return pmidic
    advRowConverter.loadAllAdvs(partsOfSpeechList[posList_indices_c["adverb"]])
    print "loading existing verbs into verb converter..."
    verbRowConverter.loadAllVerbs(partsOfSpeechList[posList_indices_c["verb"]], lexicon_c)
    print "loading PMI_Dictionary into verb converter..."
    verbRowConverter.loadPMIDictionary()#original

##    pmidic = verbRowConverter.loadPMIDictionary()#lhs debug
##    keys = pmidic.keys()
##    keys.sort()
##    for key in keys:
##        val = pmidic[key]
##        #if len(val) == 4 and 'at' in val and 'b' in val and 'yl' in val and 'clause' in val:
##        if 'b' in val and 'at' in val and 'yl' in val and ('l' in val or 'al' in val):
##            print(str(key) + ': ' + str(val))

    csv_f = openCSV("lexica/new_dinflections.csv", '\t')
    # skip the header row
    iterRows = iter(csv_f)
    next(iterRows)

    print "parsing new_dinflections.csv..."

    for row in iterRows:
        # get the rowConverter class to convert the row to the proper TDL
        rowToTDL = factory.converterFactory(row)

        # skip rows that have no ways to be converted to TDL
        if rowToTDL is None:
            continue

        rowToTDL.addToDictionary()