Esempio n. 1
0
def extractTimeArg(parsedLine):
    timeArg = ""

    print "parsedLine = " + str(parsedLine)

    # case list, which will occur for facts
    if type(parsedLine) == list:
        for i in range(
                0,
                len(parsedLine)):  # time arg immediately succeeds an ampersand
            if parsedLine[i] == "@":
                try:
                    timeArg = parsedLine[i + 1]
                except:
                    sys.exit("ERROR: Missing time argument after '@' in " +
                             str(parsedLine))

    # case string, which will occur for rules
    elif type(parsedLine) == str:
        aString = parsedLine
        for i in range(0, len(aString)):
            aChar = aString[i]
            if aChar == "@":
                try:
                    timeArg = parsedLine[i + 1]
                except:
                    sys.exit("ERROR: Missing time argument after '@' in " +
                             str(parsedLine))

    else:
        tools.bp(__name__,
                 inspect.stack()[0][3],
                 "parsedLine in not a list or a string: " + str(parsedLine))

    return timeArg
Esempio n. 2
0
 def test_bp_tools(self):
     with self.assertRaises(SystemExit) as cm:
         tools.bp("testfile", "testfunc", "testmsg")
     self.assertTrue(
         cm.exception.code in
         "BREAKPOINT in file testfile at function testfunc :\n>>> testmsg FAIL"
     )
Esempio n. 3
0
def addSubgoalsToRules(domainRuleName, newDMRIDList, cursor):

    # -------------------------------------------- #
    # get goal att list for the dom_not_whatever
    # rule previously written in addDomainRules
    # -------------------------------------------- #

    # get rid for the domain rule
    cursor.execute("SELECT rid FROM Rule WHERE goalName=='" + domainRuleName +
                   "'")
    rid = cursor.fetchone()
    if not rid or rid == "":
        tools.bp(
            __name__,
            inspect.stack()[0][3],
            "FATAL ERROR : writing domain subgoals, but no '" +
            domainRuleName + "' rule exists. aborting...")
    else:
        rid = tools.toAscii_str(rid)

    # get the goal attribute list for the domain rule
    cursor.execute("SELECT attID,attName,attType FROM GoalAtt WHERE rid=='" +
                   rid + "'")
    goalAttList = cursor.fetchall()
    goalAttList = tools.toAscii_multiList(goalAttList)

    for rid in newDMRIDList:

        # ----------------------------------- #
        # generate sid                        #
        # ----------------------------------- #
        sid = tools.getID()

        # ----------------------------------- #
        # fixed subgoal time arg to nothing   #
        # ----------------------------------- #
        subgoalTimeArg = ""

        # ----------------------------------- #
        # insert subgoal metadata             #
        # ----------------------------------- #
        cursor.execute("INSERT INTO Subgoals VALUES ('" + rid + "','" + sid +
                       "','" + domainRuleName + "','" + subgoalTimeArg + "')")

        # ----------------------------------- #
        # insert subgoal att data             #
        # ----------------------------------- #

        attID = 0
        for att in goalAttList:

            attName = att[1]
            attType = att[2]

            cursor.execute("INSERT INTO SubgoalAtt VALUES ('" + rid + "','" +
                           sid + "','" + str(attID) + "','" + attName + "','" +
                           attType + "')")

            attID += 1
Esempio n. 4
0
def extractAttList(parsedLine):
    attList = []

    if type(parsedLine) is list:  # occurs when parsed line is a fact
        saveFlag = False
        skipChars = [",", "'", '"', " ", ";"]

        for item in parsedLine:  # save everything except the skip chars
            if item == '(':
                saveFlag = True
                continue
            elif item == ')':
                saveFlag = False
                continue
            elif item in skipChars:
                continue
            elif saveFlag:
                attList.append(item)

    elif type(parsedLine) is str:  # occurs when parsedLine is a rule
        aString = parsedLine

        # .................................. #
        #if "@" in aString :
        #  print "aString = " + str(aString)
        #  tools.bp( __name__, inspect.stack()[0][3], "attList = " + str(attList) )
        # .................................. #

        for i in range(0, len(aString)):
            aChar = aString[i]
            if aChar == "(":

                for j in range(i, len(aString)):
                    if aString[j] == ")":

                        attList = aString[i + 1:j]
                        attList = attList.split(",")

                        # .................................. #
                        #if "@" in aString :
                        #  print "aString = " + str(aString)
                        #  tools.bp( __name__, inspect.stack()[0][3], "attList = " + str(attList) )
                        # .................................. #

                        return attList

    else:
        tools.bp(
            __name__,
            inspect.stack()[0][3],
            "FATAL ERROR : parsedLine is not a list or a string:\n" +
            str(parsedLine) + "\ntype(parsedLine) = " + str(type(parsedLine)))

    return attList
Esempio n. 5
0
def rewriteInductive(cursor):

    if DEDALUSREWRITER_DEBUG:
        print " ... running inductive rewrite ... "

    # grab all existing next rule ids
    inductiveRuleIDs = getInductiveRuleIDs(cursor)

    # check for bugs
    if DEDALUSREWRITER_DUMPS_DEBUG:
        print "inductiveRuleIDs = " + str(inductiveRuleIDs)
        print "<><><><><><><><><><><><><><><>"
        print "    DUMPING INDUCTIVE RULES   "
        for r in inductiveRuleIDs:
            print dumpers.reconstructRule(r, cursor)
        print "<><><><><><><><><><><><><><><>"

    # add attribute 'SndTime+1' to head
    for rid in inductiveRuleIDs:

        if not tools.checkIfRewrittenAlready(rid, cursor):
            # set rule as rewritten
            cursor.execute("UPDATE Rule SET rewritten='" + rewrittenFlag +
                           "' WHERE rid=='" + rid + "'")

            # grab maximum attribute id
            cursor.execute("SELECT MAX(attID) FROM GoalAtt WHERE rid == '" +
                           rid + "'")
            rawMaxID = cursor.fetchone()
            rawMaxID = rawMaxID[0]  # extract from tuple

            # .................................. #
            #cursor.execute( "SELECT goalName FROM Rule WHERE rid=='" + rid + "'" )
            #goalName = cursor.fetchone()
            #goalName = tools.toAscii_str( goalName )
            #print "___ goalName = " + str(goalName)
            #if goalName == "clients" :
            #  print "rawMaxID = " + str( rawMaxID )
            #  sys.exit()
            # .................................. #

            if rawMaxID or (rawMaxID == 0):
                newAttID = int(rawMaxID + 1)  # the att id for SndTime

                # check if add arg is a specific time
                cursor.execute("SELECT goalTimeArg FROM Rule WHERE rid == '" +
                               rid + "'")
                timeArg = cursor.fetchone()
                timeArg = tools.toAscii_str(timeArg)

                # add attribute 'SndTime+1' to head
                cursor.execute("INSERT INTO GoalAtt VALUES ('" + rid + "','" +
                               str(newAttID) + "','" + timeAtt_snd + "+1" +
                               "','int')")
            else:
                tools.bp(
                    __name__,
                    inspect.stack()[0][3],
                    "FATAL ERROR : current rule goal has no attributes:\n" +
                    dumpers.reconstructRule(rid, cursor))

            if DEDALUSREWRITER_DEBUG:
                print "inductive: rawMaxID    = " + str(rawMaxID)
                print "inductive: rawMaxID[0] = " + str(rawMaxID[0])

    #   add attribute 'SndTime' to all subgoals
    for rid in inductiveRuleIDs:
        sids = getSubgoalIDs(cursor, rid)  # get all subgoal ids
        sids = tools.toAscii_list(sids)

        firstSubgoalAtts = []
        for s in sids:
            cursor.execute(
                '''SELECT MAX(attID) FROM SubgoalAtt WHERE SubgoalAtt.sid == "'''
                + s + '''"''')
            rawMaxID = cursor.fetchone()
            newAttID = int(rawMaxID[0] + 1)
            cursor.execute("INSERT INTO SubgoalAtt VALUES ('" + rid + "','" +
                           s + "'," + str(newAttID) + ",'" + timeAtt_snd +
                           "','int')")

            # while we're here, collect the first attribute of this subgoal
            cursor.execute(
                "SELECT argName FROM SubgoalAddArgs WHERE SubgoalAddArgs.rid == '"
                + rid + "' AND SubgoalAddArgs.sid == '" + s + "'")
            addArg = cursor.fetchone()
            if not addArg == None:
                addArg = tools.toAscii_str(addArg)

            cursor.execute(
                "SELECT attName FROM SubgoalAtt WHERE SubgoalAtt.sid == '" +
                s + "' AND SubgoalAtt.attID == '" + str(0) + "'")
            firstAtt = cursor.fetchone()
            if (not firstAtt == None) and (not addArg == "notin"):
                firstAtt = tools.toAscii_str(firstAtt)
                firstSubgoalAtts.append(firstAtt)
            else:
                if DEDALUSREWRITER_DEBUG:
                    print "firstAtt = " + str(firstAtt)

        # add clock subgoal
        clockTools.addClockSubgoal_inductive(rid, firstSubgoalAtts,
                                             timeAtt_snd, timeAtt_deliv,
                                             cursor)

    # remove time arg from rule goals
    for rid in inductiveRuleIDs:
        cursor.execute("UPDATE Rule SET goalTimeArg='' WHERE rid='" + rid +
                       "'")

    # check for bugs
    if DEDALUSREWRITER_DUMPS_DEBUG:
        print "Dump all rules from inductive : "
        dumpers.ruleDump(cursor)

    if DEDALUSREWRITER_DEBUG:
        print "... done rewriteInductive ..."

    return None
Esempio n. 6
0
def c4datalog(argDict, cursor):

    logging.debug("  C4 DATALOG : running process...")

    goalName = None
    provGoalNameOrig = None

    tableListStr = ""  # collect all table names delmited by a single comma only.
    tableListArray = []

    # ----------------------------------------------------------- #
    # create goal defines

    # get all rids
    cursor.execute("SELECT rid FROM Rule")
    ridList = cursor.fetchall()
    ridList = tools.toAscii_list(ridList)

    definesNames = []
    definesList = []
    # ////////////////////////////////////////////////////////// #
    # populate defines list for rules
    for rid in ridList:
        newDefine = ""

        # get goal name
        cursor.execute("SELECT goalName FROM Rule WHERE rid = '" + rid + "'")
        goalName = cursor.fetchone()
        goalName = tools.toAscii_str(goalName)

        # if it's a prov rule, get the original goal name
        provGoalNameOrig = None
        if "_prov" in goalName:
            provGoalNameOrig = goalName.split("_prov")
            provGoalNameOrig = provGoalNameOrig[0]

        # populate table information collection structures
        tableListStr += goalName + ","
        tableListArray.append(goalName)

        # ////////////////////////////////////////////////////////// #
        # populate defines list for rule goals
        logging.debug("In c4datalog: definesList = " + str(definesList))

        if not existingDefine(goalName, definesNames):  # prevent duplicates

            # get goal attribute list
            cursor.execute("SELECT attID,attType From GoalAtt WHERE rid = '" +
                           rid + "'")
            goalAttList = cursor.fetchall()
            goalAttList = tools.toAscii_multiList(goalAttList)

            logging.debug("* goalName = " + goalName + ", goalAttList " +
                          str(goalAttList))

            # populate type list for rule
            typeList = []
            for k in range(0, len(goalAttList)):
                att = goalAttList[k]
                attID = att[0]
                attType = att[1]

                typeList.append(attType)

            # populate new c4 define statement
            newDefine = ""
            newDefine += "define("
            newDefine += goalName
            newDefine += ",{"

            for i in range(0, len(typeList)):
                newDefine += typeList[i]
                if i < len(typeList) - 1:
                    newDefine += ","
                else:
                    newDefine += "});" + "\n"

            # save new c4 define statement
            if not newDefine in definesList:
                definesNames.append(goalName)
                definesList.append(newDefine)
        # ////////////////////////////////////////////////////////// #

    # ----------------------------------------------------------- #
    # create fact defines

    # get all fact ids
    cursor.execute("SELECT fid FROM Fact")
    fidList = cursor.fetchall()
    fidList = tools.toAscii_list(fidList)

    for fid in fidList:

        # get goal name
        cursor.execute("SELECT name FROM Fact WHERE fid = '" + fid + "'")
        factName = cursor.fetchone()
        factName = tools.toAscii_str(factName)

        logging.debug("**> factName = " + factName)

        logging.debug("In c4datalog: definesList = " + str(definesList))

        if not existingDefine(factName, definesNames):  # prevent duplicates

            # populate table string
            tableListStr += factName + ","
            tableListArray.append(factName)

            # get goal attribute list
            cursor.execute(
                "SELECT dataID,dataType From FactData WHERE fid = '" + fid +
                "'")
            factAttList = cursor.fetchall()
            factAttList = tools.toAscii_multiList(factAttList)

            logging.debug("* factName = " + factName + ", factAttList " +
                          str(factAttList))

            # populate type list for fact
            typeList = []
            for k in range(0, len(factAttList)):
                att = factAttList[k]
                attID = att[0]
                attType = att[1]

                typeList.append(attType)

            # check for time argument
            #cursor.execute( "SELECT timeArg FROM Fact WHERE fid='" + fid + "'" )
            #timeArg = cursor.fetchone()
            #timeArg = tools.toAscii_str( timeArg )

            #if timeArg :
            #  typeList.append( "int" )

            # populate new c4 define statement
            newDefine = ""
            newDefine += "define("
            newDefine += factName
            newDefine += ",{"

            for i in range(0, len(typeList)):
                newDefine += typeList[i]
                if i < len(typeList) - 1:
                    newDefine += ","
                else:
                    newDefine += "});" + "\n"

            # save new c4 define statement
            if not newDefine in definesList:
                definesNames.append(factName)
                definesList.append(newDefine)
    # ////////////////////////////////////////////////////////// #

    # ----------------------------------------------------------- #
    # add clock define

    definesList.append("define(clock,{string,string,int,int});\n")
    tableListStr += "clock,"
    tableListArray.append("clock")

    # ----------------------------------------------------------- #
    # add not_clock define

    #definesList.append( "define(not_clock,{string,string,int,int});\n" )
    #tableListStr += "not_clock,"
    #tableListArray.append( "not_clock" )

    # ----------------------------------------------------------- #
    # add crash define

    definesList.append("define(crash,{string,string,int,int});\n")
    tableListStr += "crash,"
    tableListArray.append("crash")

    # ----------------------------------------------------------- #
    # add facts

    cursor.execute("SELECT fid FROM Fact")
    fidList = cursor.fetchall()
    fidList = tools.toAscii_list(fidList)

    factList = []
    for fid in fidList:
        newFact = dumpers_c4.dumpSingleFact_c4(fid, cursor)
        factList.append(newFact)

    # ----------------------------------------------------------- #
    # add clock facts

    clockFactList = dumpers_c4.dump_clock(cursor)

    logging.debug("c4_translator: clockFactList = " + str(clockFactList))

    # ----------------------------------------------------------- #
    # add crash facts

    crashFactList = dumpers_c4.dump_crash(cursor)
    #crashFactList = []

    #print crashFactList
    #tools.bp( __name__, inspect.stack()[0][3], "blah" )

    #logging.debug( "c4_translator: crashFactList = " + str( crashFactList ) )

    # ----------------------------------------------------------- #
    # add rules

    cursor.execute("SELECT rid FROM Rule")
    ridList = cursor.fetchall()
    ridList = tools.toAscii_list(ridList)

    ruleList = []
    for rid in ridList:

        # verify data type compatibility for rules with equations
        #verificationResults = tools.checkDataTypes( rid, cursor ) # returns array

        #yesCompatible = verificationResults[0]
        #offensiveEqn  = verificationResults[1]
        #lhsType       = verificationResults[2]
        #rhsType       = verificationResults[3]

        #if yesCompatible :
        if True:
            newRule = dumpers_c4.dumpSingleRule_c4(rid, cursor)
            ruleList.append(newRule)

        else:  # data types are incompatible
            # throw error and abort
            tools.bp(
                __name__,
                inspect.stack()[0][3],
                "FATAL ERROR: DATA TYPE INCOMPATABILITY\nAttempting to evaluate an equation in which variables possess incomparable types.\nERROR in line: "
                + dumpers_c4.dumpSingleRule_c4(rid, cursor) +
                "\nERROR in eqn: " + offensiveEqn + "\nlhs is of type " +
                lhsType + " and rhs is of type " + rhsType)

    # ------------------------------------------------------ #
    # grab the next rule handling method

    try:
        NEXT_RULE_HANDLING = tools.getConfig( argDict[ "settings" ], \
                                              "DEFAULT", \
                                              "NEXT_RULE_HANDLING", \
                                              str )

    except ConfigParser.NoOptionError:
        logging.info(
            "WARNING : no 'NEXT_RULE_HANLDING' defined in 'DEFAULT' section of settings file."
        )
        tools.bp( __name__, inspect.stack()[0][3], \
                 "FATAL ERROR : NEXT_RULE_HANDLING parameter not specified in DEFAULT section of settings file. use 'USE_AGGS', 'SYNC_ASSUMPTION', or 'USE_NEXT_CLOCK' only." )

    # sanity check next rule handling value
    if NEXT_RULE_HANDLING == "USE_AGGS" or \
       NEXT_RULE_HANDLING == "SYNC_ASSUMPTION" or \
       NEXT_RULE_HANDLING == "USE_NEXT_CLOCK" :
        pass
    else:
        tools.bp( __name__, inspect.stack()[0][3], \
                  "FATAL ERROR : unrecognized NEXT_RULE_HANDLING value '" + NEXT_RULE_HANDLING + "'. use 'USE_AGGS', 'SYNC_ASSUMPTION', or 'USE_NEXT_CLOCK' only." )

    # ----------------------------------------------------------- #
    # add next_clock, if necessary

    if NEXT_RULE_HANDLING == "USE_NEXT_CLOCK":

        # ------------------------------------------------------ #
        # add define

        definesList.append("define(next_clock,{string,string,int,int});\n")
        tableListStr += "next_clock,"
        tableListArray.append("next_clock")

        # ------------------------------------------------------ #
        # add next_clock facts for all synchronous facts appearing clock

        next_clock_factList = []
        for cfact in clockFactList:
            if isSynchronous(cfact):
                next_clock_fact = "next_" + cfact
                next_clock_factList.append(next_clock_fact)

    # ----------------------------------------------------------- #
    # save table list

    logging.debug("*******************************************")
    logging.debug("table list str :\n" + str(tableListStr))
    logging.debug("table list array :\n" + str(tableListArray))

    # ----------------------------------------------------------- #
    # collect program statements

    logging.debug("*******************************************")
    logging.debug("definesList :\n" + str(definesList))
    logging.debug("*******************************************")
    logging.debug("factList :\n" + str(factList))
    logging.debug("*******************************************")
    logging.debug("ruleList :\n" + str(ruleList))

    # NOTE: listOfStatementLists controls the ordering of statements
    #       in the final c4 program.
    if NEXT_RULE_HANDLING == "USE_NEXT_CLOCK":
        listOfStatementLists = [ definesList, \
                                 ruleList, \
                                 factList, \
                                 crashFactList, \
                                 next_clock_factList, \
                                 clockFactList ]
    else:
        #listOfStatementLists = [ definesList, \
        #                         factList, \
        #                         ruleList, \
        #                         clockFactList ]
        listOfStatementLists = [ definesList, \
                                 ruleList, \
                                 factList, \
                                 crashFactList, \
                                 clockFactList ]

    program = tools.combineLines(listOfStatementLists)

    # break down into list of individual statements
    allProgramLines = []
    for group in listOfStatementLists:
        for statement in group:
            allProgramLines.append(statement.rstrip())

    # remove duplicates
    tableListArray = set(tableListArray)
    tableListArray = list(tableListArray)

    logging.debug("  C4 DATALOG : ...done.")
    return [allProgramLines, tableListArray]
class Test_toy2_breaking_example(unittest.TestCase):

    # get debug level
    if log_settings.debug_level == "debug":
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.DEBUG)

    elif log_settings.debug_level == "info":
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.INFO)

    elif log_settings.debug_level == "warning":
        logging.basicConfig(format='%(levelname)s:%(message)s',
                            level=logging.WARNING)

    else:
        tools.bp(
            __name__,
            inspect.stack()[0][3], "FATAL ERROR : unrecognized debug_level '" +
            debug_level + "'. use 'debug', 'info', or 'warning' only.")

    # set globals
    PRINT_STOP = False
    COMPARE_PROGS = True

    ##############################
    #  DM TOY 2 USE NEXT CLOCK  #
    ##############################
    # tests rewriting the second toy program using next_clock
    #@unittest.skip( "c4 illogically calculates a('a',2,2) and domcomp_a('a',2,2). behavior did not occur when using aggregates in next rules." )
    def test_dm_toy2_use_next_clock(self):

        # specify input and output paths
        inputfile = "./toy2.ded"
        expected_iapyx_dm_path = "./toy2_breaking_example_use_next_clock.olg"

        # get argDict
        argDict = self.getArgDict(inputfile)
        argDict["settings"] = "./settings_use_next_clock.ini"

        self.comparison_workflow(argDict, inputfile, expected_iapyx_dm_path,
                                 None)

    ###############################
    #  DM TOY 2 SYNC ASSUMPTION  #
    ###############################
    # tests rewriting the second toy program using the synchronous assumption
    #@unittest.skip( "c4 illogically calculates a('a',2,2) and domcomp_a('a',2,2). behavior did not occur when using aggregates in next rules." )
    def test_dm_toy2_sync_assumption(self):

        # specify input and output paths
        inputfile = "./toy2.ded"
        expected_iapyx_dm_path = "./toy2_breaking_example_sync_assumption.olg"

        # get argDict
        argDict = self.getArgDict(inputfile)
        argDict["settings"] = "./settings_sync_assumption.ini"

        self.comparison_workflow(argDict, inputfile, expected_iapyx_dm_path,
                                 None)

    ########################
    #  DM TOY 2 USE AGGS  #
    ########################
    # tests rewriting the second toy program using agg rewrites
    #@unittest.skip( "c4 illogically calculates a('a',2,2) and domcomp_a('a',2,2). behavior did not occur when using aggregates in next rules." )
    def test_dm_toy2_use_aggs(self):

        # specify input and output paths
        inputfile = "./toy2.ded"
        expected_iapyx_dm_path = "./toy2_breaking_example_use_aggs.olg"

        # get argDict
        argDict = self.getArgDict(inputfile)
        argDict["settings"] = "./settings_use_aggs.ini"

        self.comparison_workflow(argDict, inputfile, expected_iapyx_dm_path,
                                 None)

    #########################
    #  COMPARISON WORKFLOW  #
    #########################
    # defines iapyx dm comparison workflow
    def comparison_workflow(self, argDict, inputfile, expected_iapyx_dm_path,
                            expected_eval_path):

        # --------------------------------------------------------------- #
        # testing set up.

        if os.path.isfile("./IR.db"):
            logging.debug("  COMPARISON WORKFLOW : removing rogue IR file.")
            os.remove("./IR.db")

        testDB = "./IR.db"
        IRDB = sqlite3.connect(testDB)
        cursor = IRDB.cursor()

        # --------------------------------------------------------------- #
        # reset counters for new test

        dedt.globalCounterReset()

        # --------------------------------------------------------------- #
        # runs through function to make sure it finishes with expected error

        # run translator
        programData = dedt.translateDedalus(argDict, cursor)

        # portray actual output program lines as a single string
        iapyx_results = self.getActualResults(programData[0])

        if self.PRINT_STOP:
            print iapyx_results
            sys.exit("print stop.")

        # ========================================================== #
        # IAPYX COMPARISON
        #
        # grab expected output results as a string

        if self.COMPARE_PROGS:
            expected_iapyx_results = None
            with open(expected_iapyx_dm_path, 'r') as expectedFile:
                expected_iapyx_results = expectedFile.read()

            self.assertEqual(iapyx_results, expected_iapyx_results)

        # ========================================================== #
        # EVALUATION COMPARISON

        self.evaluate(programData, expected_eval_path)

        # --------------------------------------------------------------- #
        #clean up testing

        IRDB.close()
        os.remove(testDB)

    ##############
    #  EVALUATE  #
    ##############
    # evaluate the datalog program using some datalog evaluator
    # return some data structure or storage location encompassing the evaluation results.
    def evaluate(self, programData, expected_eval_path):

        noOverlap = False

        results_array = c4_evaluator.runC4_wrapper(programData)

        # ----------------------------------------------------------------- #
        # convert results array into dictionary

        eval_results_dict = tools.getEvalResults_dict_c4(results_array)

        # ----------------------------------------------------------------- #
        # collect all pos/not_ rule pairs

        rule_pairs = self.getRulePairs(eval_results_dict)

        logging.debug("  EVALUATE : rule_pairs = " + str(rule_pairs))

        # ----------------------------------------------------------------- #
        # make sure tables do not overlap

        self.assertFalse(self.hasOverlap(rule_pairs, eval_results_dict))

        # ----------------------------------------------------------------- #
        # make sure dm positive relation results are identical to molly
        # relation results

        if expected_eval_path:

            self.compare_evals(eval_results_dict, expected_eval_path)

    ###################
    #  COMPARE EVALS  #
    ###################
    # compare the actual evaluation results with the
    # expected evaluation results.
    def compare_evals(self, eval_results_dict, expected_eval_path):

        # ----------------------------------------------------------------- #
        # get a dictionary of the expected results

        expected_results_array = []
        fo = open(expected_eval_path)
        for line in fo:
            line = line.rstrip()
            expected_results_array.append(line)
        fo.close()

        expected_eval_results_dict = tools.getEvalResults_dict_c4(
            expected_results_array)

        # ----------------------------------------------------------------- #
        # compare all positive tables (not prov)

        for rel_key in eval_results_dict:

            # ----------------------------------------------------------------- #
            # skip not_ rules, _prov rules, adom_ rules

            if rel_key.startswith( "not_" ) or \
               rel_key.startswith( "domcomp_" ) or \
               rel_key.startswith( "dom_" ) or \
               rel_key == "adom_string" or \
               rel_key == "adom_int" or \
               "_prov" in rel_key or \
               "_agg" in rel_key :

                pass

            # ----------------------------------------------------------------- #

            else:

                actual_eval = eval_results_dict[rel_key]
                expected_eval = expected_eval_results_dict[rel_key]

                flag = True
                for expected_row in expected_eval:
                    if not expected_row in actual_eval:
                        logging.debug(
                            "  COMPARE_EVALS : missing row : relation = " +
                            rel_key + "\nexpected_row = " + str(expected_row) +
                            "\nactual_eval = " + str(actual_eval))
                        flag = False
                        break

                self.assertTrue(flag)

    #################
    #  HAS OVERLAP  #
    #################
    # make sure pos and not_pos tables do not overlap
    def hasOverlap(self, rule_pairs, eval_results_dict):

        for pair in rule_pairs:

            logging.debug("  HAS OVERLAP : pair = " + str(pair))

            pos_results = eval_results_dict[pair[0]]
            not_results = eval_results_dict[pair[1]]

            #      # check is too strong
            #      logging.debug( "  HAS OVERLAP : pos_results :" )
            #      for r in pos_results :
            #        logging.debug( r )
            #
            #      logging.debug( "  HAS OVERLAP : not_results :" )
            #      for r in not_results :
            #        logging.debug( r )
            #
            #      if len( pos_results ) > 0 :
            #        nonEmpty_pos = True
            #      else :
            #        nonEmpty_pos = False
            #      self.assertTrue( nonEmpty_pos )
            #
            #      if len( not_results ) > 0 :
            #        nonEmpty_not = True
            #      else :
            #        nonEmpty_not = False
            #      self.assertTrue( nonEmpty_not )

            for pos_row in pos_results:
                if pos_row in not_results:
                    logging.debug("HAS OVERLAP : pos_row '" + str(pos_row) +
                                  "' is in not_results:")
                    for not_row in not_results:
                        logging.debug("HAS OVERLAP : not_row " + str(not_row))
                    return True

        return False

    ####################
    #  GET RULE PAIRS  #
    ####################
    # grab all pos/not_
    def getRulePairs(self, eval_results_dict):

        pair_list = []

        # pull out positive names
        for relName1 in eval_results_dict:

            if not relName1.startswith("not_") and not "_prov" in relName1:

                for relName2 in eval_results_dict:

                    if not relName1 == relName2:
                        if relName2.startswith(
                                "not_") and relName2[4:] == relName1:
                            pair_list.append([relName1, relName2])

        return pair_list

    ##################
    #  MATCH EXISTS  #
    ##################
    # check if the iapyx_rule appears in the molly_line_array
    def matchExists(self, iapyx_rule, molly_line_array):

        logging.debug(
            "-------------------------------------------------------------")
        logging.debug("  MATCH EXISTS : iapyx_rule        = " + iapyx_rule)

        iapyx_goalName = self.getGoalName(iapyx_rule)
        iapyx_goalAttList = self.getGoalAttList(iapyx_rule)
        iapyx_body = self.getBody(iapyx_rule)

        logging.debug("  MATCH EXISTS : iapyx_goalName    = " + iapyx_goalName)
        logging.debug("  MATCH EXISTS : iapyx_goalAttList = " +
                      str(iapyx_goalAttList))
        logging.debug("  MATCH EXISTS : iapyx_body        = " + iapyx_body)

        for line in molly_line_array:

            if self.isRule(line):

                molly_goalName = self.getGoalName(line)
                molly_goalAttList = self.getGoalAttList(line)
                molly_body = self.getBody(line)

                logging.debug("  MATCH EXISTS : molly_goalName    = " +
                              molly_goalName)
                logging.debug("  MATCH EXISTS : molly_goalAttList = " +
                              str(molly_goalAttList))
                logging.debug("  MATCH EXISTS : molly_body        = " +
                              molly_body)

                # goal names and bodies match
                if self.sameName(iapyx_goalName, molly_goalName):

                    logging.debug("  MATCH EXISTS : identical goalNames.")

                    if self.sameBodies(iapyx_body, molly_body):

                        logging.debug("  MATCH EXISTS : identical goalNames.")

                        # make sure all iapyx atts appear in the molly att list
                        iapyx_match = False
                        for iapyx_att in iapyx_goalAttList:
                            if iapyx_att in molly_goalAttList:
                                iapyx_match = True

                        # make sure all molly atts appear in the iapyx att list
                        molly_match = False
                        for molly_att in molly_goalAttList:
                            if molly_att in iapyx_goalAttList:
                                molly_match = True

                        if iapyx_match or molly_match:
                            logging.debug("  MATCH EXISTS : returning True")
                            return True

                    else:
                        logging.debug(
                            "  MATCH EXISTS : different bodies : iapyx_body = "
                            + iapyx_body + ", molly_body = " + molly_body)

                else:
                    logging.debug(
                        "  MATCH EXISTS : different goalNames (sans _prov# appends) : iapyx_goalName = "
                        + iapyx_goalName + ", molly_goalName = " +
                        molly_goalName)

        logging.debug("  MATCH EXISTS : returning False")
        return False

    #################
    #  SAME BODIES  #
    #################
    # separate subgoals and eqns in to separate lists.
    # make sure all elements appear in both lists.
    def sameBodies(self, body1, body2):

        # compare eqn lists
        eqnList1 = self.getEqnList(body1)
        eqnList2 = self.getEqnList(body2)

        if len(eqnList1) == len(eqnList2):
            eqnListLen = True
        else:
            eqnListLen = False

        sameEqns = False
        if eqnList1 == eqnList2:
            sameEqns = True
        else:
            for e1 in eqnList1:
                if e1 in eqnList2:
                    sameEqns = True

        # compare subgoal lists
        subgoalList1 = self.getSubgoalList(body1, eqnList1)
        subgoalList2 = self.getSubgoalList(body2, eqnList2)

        if len(subgoalList1) == len(subgoalList2):
            subListLen = True
        else:
            subListLen = False

        sameSubgoals = False
        for e1 in subgoalList1:
            if e1 in subgoalList2:
                sameSubgoals = True

        logging.debug("  SAME BODIES : eqnList1     = " + str(eqnList1))
        logging.debug("  SAME BODIES : eqnList2     = " + str(eqnList2))
        logging.debug("  SAME BODIES : subgoalList1 = " + str(subgoalList1))
        logging.debug("  SAME BODIES : subgoalList2 = " + str(subgoalList2))
        logging.debug("  SAME BODIES : subListLen   = " + str(subListLen))
        logging.debug("  SAME BODIES : sameSubgoals = " + str(sameSubgoals))
        logging.debug("  SAME BODIES : eqnListLen   = " + str(eqnListLen))
        logging.debug("  SAME BODIES : sameEqns     = " + str(sameEqns))

        if subListLen and sameSubgoals and eqnListLen and sameEqns:
            return True
        else:
            return False

    ######################
    #  GET SUBGOAL LIST  #
    ######################
    # extract the list of subgoals from the given rule body
    def getSubgoalList(self, body, eqnList):

        # ========================================= #
        # replace eqn instances in line
        for eqn in eqnList:
            body = body.replace(eqn, "")

        body = body.replace(",,", ",")

        # ========================================= #
        # grab subgoals

        # grab indexes of commas separating subgoals
        indexList = self.getCommaIndexes(body)

        #print indexList

        # replace all subgoal-separating commas with a special character sequence
        tmp_body = ""
        for i in range(0, len(body)):
            if not i in indexList:
                tmp_body += body[i]
            else:
                tmp_body += "___SPLIT___HERE___"
        body = tmp_body

        # generate list of separated subgoals by splitting on the special
        # character sequence
        subgoalList = body.split("___SPLIT___HERE___")

        # remove empties
        tmp_subgoalList = []
        for sub in subgoalList:
            if not sub == "":
                tmp_subgoalList.append(sub)
        subgoalList = tmp_subgoalList

        return subgoalList

    ##################
    #  GET EQN LIST  #
    ##################
    # extract the list of equations in the given rule body
    def getEqnList(self, body):

        body = body.split(",")

        # get the complete list of eqns from the rule body
        eqnList = []
        for thing in body:
            if self.isEqn(thing):
                eqnList.append(thing)

        return eqnList

    #######################
    #  GET COMMA INDEXES  #
    #######################
    # given a rule body, get the indexes of commas separating subgoals.
    def getCommaIndexes(self, body):

        underscoreStr = self.getCommaIndexes_helper(body)

        indexList = []
        for i in range(0, len(underscoreStr)):
            if underscoreStr[i] == ",":
                indexList.append(i)

        return indexList

    ##############################
    #  GET COMMA INDEXES HELPER  #
    ##############################
    # replace all paren contents with underscores
    def getCommaIndexes_helper(self, body):

        # get the first occuring paren group
        nextParenGroup = "(" + re.search(r'\((.*?)\)', body).group(1) + ")"

        # replace the group with the same number of underscores in the body
        replacementStr = ""
        for i in range(0, len(nextParenGroup) - 2):
            replacementStr += "_"
        replacementStr = "_" + replacementStr + "_"  # use underscores to replace parentheses

        body = body.replace(nextParenGroup, replacementStr)

        # BASE CASE : no more parentheses
        if not "(" in body:
            return body

        # RECURSIVE CASE : yes more parentheses
        else:
            return self.getCommaIndexes_helper(body)

    ############
    #  IS EQN  #
    ############
    # check if input contents from the rule body is an equation
    def isEqn(self, sub):

        flag = False
        for op in eqnOps:
            if op in sub:
                flag = True

        return flag

    ###############
    #  SAME NAME  #
    ###############
    # extract the core name, without the '_prov' append, and compare
    # if rule name is an _vars#_prov, cut off at the end of the _vars append
    def sameName(self, name1, name2):

        # extract the core name for the first input name
        if self.isAggsProvRewrite(name1):
            endingStr = re.search('(.*)_prov(.*)', name1)
            coreName1 = name1.replace(endingStr.group(1), "")
        elif self.isProvRewrite(name1):
            coreName1 = name1.split("_prov")
            coreName1 = coreName1[:-1]
            coreName1 = "".join(coreName1)
        else:
            coreName1 = name1

        # extract the core name for the second input name
        if self.isAggsProvRewrite(name2):
            endingStr = re.search('(.*)_prov(.*)', name2)
            coreName2 = name1.replace(endingStr.group(1), "")
        elif self.isProvRewrite(name2):
            coreName2 = name2.split("_prov")
            coreName2 = coreName2[:-1]
            coreName2 = "".join(coreName2)
        else:
            coreName2 = name2

        logging.debug("  SAME NAME : coreName1 = " + coreName1)
        logging.debug("  SAME NAME : coreName2 = " + coreName2)

        if coreName1 == coreName2:
            logging.debug("  SAME NAME : returning True")
            return True
        else:
            logging.debug("  SAME NAME : returning False")
            return False

    ##########################
    #  IS AGGS PROV REWRITE  #
    ##########################
    # check if the input relation name is indicative of an aggregate provenance rewrite
    def isAggsProvRewrite(self, relationName):

        middleStr = re.search('_vars(.*)_prov', relationName)

        if middleStr:
            if middleStr.group(1).isdigit():
                if relationName.endswith(middleStr.group(1)):
                    return True
                else:
                    return False
            else:
                return False
        else:
            return False

    #####################
    #  IS PROV REWRITE  #
    #####################
    # check if the input relation name is indicative of an aggregate provenance rewrite
    def isProvRewrite(self, relationName):

        endingStr = re.search('_prov(.*)', relationName)

        if endingStr:
            if endingStr.group(1).isdigit():
                if relationName.endswith(endingStr.group(1)):
                    return True
                else:
                    return False
            else:
                return False
        else:
            return False

    ###################
    #  GET GOAL NAME  #
    ###################
    # extract the goal name from the input rule.
    def getGoalName(self, rule):

        logging.debug("  GET GOAL NAME : rule     = " + rule)

        goalName = rule.split("(", 1)
        goalName = goalName[0]

        logging.debug("  GET GOAL NAME : goalName = " + goalName)
        return goalName

    #######################
    #  GET GOAL ATT LIST  #
    #######################
    # extract the goal attribute list.
    def getGoalAttList(self, rule):

        attList = rule.split(")", 1)
        attList = attList[0]
        attList = attList.split("(", 1)
        attList = attList[1]
        attList = attList.split(",")

        return attList

    ##############
    #  GET BODY  #
    ##############
    # extract the rule body.
    def getBody(self, rule):

        body = rule.split(":-")
        body = body[1]

        return body

    #############
    #  IS RULE  #
    #############
    # check if input program line denotes a rule
    def isRule(self, line):
        if ":-" in line:
            return True
        else:
            return False

    ###############
    #  GET ERROR  #
    ###############
    # extract error message from system info
    def getError(self, sysInfo):
        return str(sysInfo[1])

    ########################
    #  GET ACTUAL RESULTS  #
    ########################
    def getActualResults(self, programLines):
        program_string = "\n".join(programLines)
        program_string += "\n"  # add extra newline to align with read() parsing
        return program_string

    ##################
    #  GET ARG DICT  #
    ##################
    def getArgDict(self, inputfile):

        # initialize
        argDict = {}

        # populate with unit test defaults
        argDict['prov_diagrams'] = False
        argDict['use_symmetry'] = False
        argDict['crashes'] = 0
        argDict['solver'] = None
        argDict['disable_dot_rendering'] = False
        argDict['settings'] = "./settings_dm.ini"
        argDict['negative_support'] = False
        argDict['strategy'] = None
        argDict['file'] = inputfile
        argDict['EOT'] = 4
        argDict['find_all_counterexamples'] = False
        argDict['nodes'] = ["a", "b", "c"]
        argDict['evaluator'] = "c4"
        argDict['EFF'] = 2

        return argDict