예제 #1
0
    def test_answer(self):
        self.logger.info("Test Student Answer Analysis")

        testStandardAnswerFile = "ans_Q1.txt"
        stdFilePath = os.path.join("algo/testdata/raw/Q1",
                                   testStandardAnswerFile)
        self.logger.info("stdanswer filepath:%s" % stdFilePath)
        if not os.path.isfile(stdFilePath):
            self.logger.error("Standard Test file doesn't exist:%s" %
                              testStandardAnswerFile)
            assert False
        fh = file(stdFilePath, "r")
        stdtext = fh.read()
        fh.close()

        sinst = Standard()
        pointlist, textfdist, slist = sinst.Analysis(stdtext)
        std_pointlist_no = [point['Point_No'] for point in pointlist]
        self.logger.info("Points:%s" % std_pointlist_no)

        testAnswerFile = "ans_Q1.txt"
        ansFilePath = os.path.join("algo/testdata/raw/Q1", testAnswerFile)
        self.logger.info("answer filepath:%s" % ansFilePath)
        if not os.path.isfile(ansFilePath):
            self.logger.error("Answer file doesn't exist:%s" % testAnswerFile)
            assert False
        fh = file(ansFilePath, "r")
        anstext = fh.read()
        fh.close()

        mockrulelist = [{
            'Mark':
            10,
            'Point': ['P1.1', 'P1.2', 'P1.3', 'P2', 'P3', 'P4', 'P5']
        }, {
            'Mark': 7,
            'Point': ['P1.1', 'P2', 'P3', 'P4', 'P5']
        }, {
            'Mark': 6,
            'Point': ['P1.1', 'P2', 'P3', 'P4']
        }, {
            'Mark': 5,
            'Point': ['P1.1', 'P2', 'P3']
        }, {
            'Mark': 3,
            'Point': ['P1.1', 'P2']
        }, {
            'Mark': 2,
            'Point': ['P1.1']
        }]
        pprint.pprint(mockrulelist)

        ans = Answer()
        mark, marklist, ommited = ans.Analysis(anstext, textfdist, slist,
                                               pointlist, mockrulelist)
        pprint.pprint(mark)
        pprint.pprint(ommited)
        self.logger.info("Test Student Answer Analysis Finished")
예제 #2
0
 def get_student_text_distribution(anstext, std_textfdist):
     debug_print(
         "Note: Deriving alternative global frequency distribution (from student text) for use with Answer.Analysis()"
     )
     sinst = Standard()
     stu_pointlist, stu_textfdist, stu_slist = sinst.Analysis(anstext)
     debug_print("\tstandard dist: " + str(std_textfdist), level=4)
     debug_print("\tstudent dist: " + str(stu_textfdist), level=4)
     return stu_textfdist
예제 #3
0
    def __traversal_process(self, testdir):
        ans = Answer()
        for root, dirs, files in os.walk(testdir):
            if 'Standard' in dirs:
                dirs.remove('Standard')
            for stdfile in files:
                # Check for answer file (e.g., "ans_q8.txt")
                if 'ans' in stdfile:
                    testno = stdfile[4:-4]
                    self.logger.info("no:%s" % testno)
                    stdPath = os.path.join(root, stdfile)
                    if not os.path.isfile(stdPath):
                        self.logger.error("Test file doesn't exist:%s" %
                                          stdfile)
                        assert False
                    fh = file(stdPath, "r")
                    filetext = fh.read()
                    fh.close()
                    sinst = Standard()
                    pointlist, textfdist, slist = sinst.Analysis(filetext)

                    # Check schema file (e.g., "scheme_q8.txt")
                    schemename = 'scheme_' + testno + '.txt'
                    schemepath = os.path.join(root, schemename)
                    fr = file(schemepath, 'r')
                    scheme = self.__parsescheme(fr.read())
                    fr.close()
                    rulelist = self.__updaterulelist(scheme, pointlist)
                    print("ansfile\tmark\tmarklist")
                    for idx in range(0, 10):
                        # Check student response file (e.g., "stud9_q8.txt")
                        ansfile = 'stud' + str(idx + 1) + '_' + testno + '.txt'
                        ansPath = os.path.join(root, ansfile)
                        if os.path.isfile(ansPath):
                            fa = file(ansPath, 'r')
                            anstext = fa.read()
                            fa.close()
                            if anstext:
                                # TODO: Always use freq dist for student text (not standard)
                                if (USE_STUDENT_TEXT_DIST):
                                    textfdist = get_student_text_distribution(
                                        anstext, textfdist)
                                debug_print("Calling ans.Analysis%s" % str(
                                    (anstext, textfdist, slist, pointlist,
                                     rulelist)),
                                            level=4)
                                mark, marklist, ommited = ans.Analysis(
                                    anstext, textfdist, slist, pointlist,
                                    rulelist)
                            else:
                                mark = 0
                                marklist = []
                            print("%s\t%d\t%s" % (ansfile, mark, marklist))
예제 #4
0
def check_files( ):

    n_assertion_err = 0
    for file in get_data_files( DATA_DIR ):
        try:
            execute_algorithm( Standard(), file )
        except AssertionError:
            n_assertion_err += 1
    print(n_assertion_err)
예제 #5
0
 def test_standard(self):
     self.logger.info("Test Standard Answer Analysis")
     testStandardAnswerFile = "ans_Q1.txt"
     filePath = os.path.join("algo/testdata/raw/Q1", testStandardAnswerFile)
     self.logger.info("filepath:%s" % filePath)
     if not os.path.isfile(filePath):
         self.logger.error("Test file doesn't exist:%s" %
                           testStandardAnswerFile)
         assert False
     fh = file(filePath, "r")
     filetext = fh.read()
     fh.close()
     sinst = Standard()
     pointlist, textfdist, slist = sinst.Analysis(filetext)
     #for word,freq in textfdist.items():
     #    print "%s:%d" % (word,freq)
     pprint.pprint(slist)
     self.logger.info("Test Standard Answer Analysis finished")
예제 #6
0
 def __traversal_process(self, testdir):
     ans = Answer()
     for root, dirs, files in os.walk(testdir):
         if 'Standard' in dirs:
             dirs.remove('Standard')
         for stdfile in files:
             if 'ans' in stdfile:
                 testno = stdfile[4:-4]
                 self.logger.info("no:%s" % testno)
                 stdPath = os.path.join(root, stdfile)
                 if not os.path.isfile(stdPath):
                     self.logger.error("Test file doesn't exist:%s" %
                                       stdfile)
                     assert False
                 fh = file(stdPath, "r")
                 filetext = fh.read()
                 fh.close()
                 sinst = Standard()
                 pointlist, textfdist, slist = sinst.Analysis(filetext)
                 schemename = 'scheme_' + testno + '.txt'
                 schemepath = os.path.join(root, schemename)
                 fr = file(schemepath, 'r')
                 scheme = self.__parsescheme(fr.read())
                 fr.close()
                 rulelist = self.__updaterulelist(scheme, pointlist)
                 for idx in range(0, 10):
                     ansfile = 'stud' + str(idx + 1) + '_' + testno + '.txt'
                     ansPath = os.path.join(root, ansfile)
                     if os.path.isfile(ansPath):
                         fa = file(ansPath, 'r')
                         anstext = fa.read()
                         fa.close()
                         if anstext:
                             mark, marklist, ommited = ans.Analysis(
                                 anstext, textfdist, slist, pointlist,
                                 rulelist)
                         else:
                             mark = 0
                             marklist = []
                         print("%s\t%d\t%s" % (ansfile, mark, marklist))
예제 #7
0
 def getBackend(self, string, portList):
     if (string == "standard"):
         return Standard(self.st, self.vs, portList)
     elif (string == "yarp"):
         return Yarp(self.st, self.vs, portList)
     elif (string == "zeromq"):
         return ZeroMQ(self.st, self.vs, portList)
     elif (string == "nodejs"):
         return NodeJS(self.st, self.vs, portList)
     elif (string == "ros"):
         return Ros(self.st, self.vs, portList)
     else:
         raise Exception("Invalid backend: \"" + string + "\"")
예제 #8
0
 def test_standard(self):
     self.logger.info("Test Standard Answer Analysis")
     testStandardAnswerFile = "ans_Q1.txt"
     filePath = os.path.join("algo/testdata/raw/Q1", testStandardAnswerFile)
     self.logger.info("filepath:%s" % filePath)
     if not os.path.isfile(filePath):
         self.logger.error("Test file doesn't exist:%s" %
                           testStandardAnswerFile)
         assert False
     fh = file(filePath, "r")
     filetext = fh.read()
     fh.close()
     sinst = Standard()
     pointlist, textfdist, slist = sinst.Analysis(filetext)
     if __debug__:
         print "Word frequencies"
         for word, freq in textfdist.items():
             print "%s:%d" % (word, freq)
     pprint.pprint(slist)
     if __debug__ and sinst.apply_grammar_checking:
         print("standard critique: %s" % sinst.critique_results)
     self.logger.info("Test Standard Answer Analysis finished")
예제 #9
0
def check_files():
    """
    Runs the Standar genetic algorithm with all the files and checks for
    AssertionErrors, that happens when the files are not well-structured
    Corret structure: Problem size, flow matrix and distance matrix with
    size equal to the problem size
    Current erros: 19
    """
    n_assertion_err = 0
    for file in get_data_files(DATA_DIR):
        try:
            execute_algorithm(Standard(), file)
        except AssertionError:
            print("=========== AssertionError exception on file", file)
            n_assertion_err += 1
    print(n_assertion_err)
예제 #10
0
def main():
    global collection
    cards = json.loads(open('cards.collectible.json', encoding='utf8').read())
    standard_lst = [
        c for c in cards
        if (c['set'] in
            ['GILNEAS', 'LOOTAPALOOZA', 'EXPERT1', 'ICECROWN', 'UNGORO'])
    ]

    standard = Standard(standard_lst)
    runs = 100
    total = 0
    for i in range(0, runs):
        collection = Collection(standard)
        counter = 0
        while collection.dust_remaining() > 0:
            generate_pack(standard)
            counter += 1

        total += counter
        print(f'{i}/{runs}\t\t', end='\r')

    print(f'Average: {total/runs}')
    print(f'Cost: ${((total/runs)/60)*88:.2f} CAD')
예제 #11
0
    def parse_Q1(self):
        debug_print("parse_Q1()", level=4)
        # Read in the correct answer to first question
        testStandardAnswerFile = "ans_Q1.txt"
        filePath = os.path.join("algo/testdata/raw/Q1", testStandardAnswerFile)
        self.logger.info("filepath:%s" % filePath)
        if not os.path.isfile(filePath):
            self.logger.error("Test file doesn't exist:%s" %
                              testStandardAnswerFile)
            assert False
        debug_print("Processing standard file '%s'" % filePath, level=3)
        fh = file(filePath, "r")
        filetext = fh.read()
        fh.close()

        # Check the text for optional annotations and isolate
        if CHECK_LINKAGES:
            self.standard_annotations = Annotations()
            self.standard_annotations.extract_annotations(filetext)
            filetext = self.standard_annotations.text_proper

        # Create the appropriate class instance for Standard
        # TODO: Remove abAnswer method overrides altogether and do everything via proper subclassing (RTFM!!!).
        ## OLD: sinst = abStandard()
        sinst = abStandard() if USE_OVERRIDES else Standard()

        # Perform text processing analysis over sentence and return result along with some mocked up rules
        pointlist, textfdist, slist = sinst.Analysis(filetext)
        rulelist = [{
            'Mark': 10,
            'Point': ['P1.1', 'P1.2', 'P2', 'P3', 'P4', 'P5', 'P6']
        }, {
            'Mark': 9,
            'Point': ['P2', 'P1.1', 'P6', 'P4', 'P5']
        }, {
            'Mark': 9,
            'Point': ['P2', 'P1.2', 'P6', 'P4', 'P5']
        }, {
            'Mark': 9,
            'Point': ['P2', 'P3', 'P6', 'P4', 'P5']
        }, {
            'Mark': 9,
            'Point': ['P2', 'P3', 'P6', 'P4', 'P5', 'P1.2']
        }, {
            'Mark': 9,
            'Point': ['P2', 'P3', 'P6', 'P4', 'P5', 'P1.1']
        }, {
            'Mark': 9,
            'Point': ['P3', 'P6', 'P4', 'P5', 'P1.2', 'P1.1']
        }, {
            'Mark': 8,
            'Point': ['P3', 'P6', 'P4', 'P5']
        }, {
            'Mark': 7,
            'Point': ['P2', 'P6', 'P4', 'P5', 'P1.2', 'P1.1']
        }, {
            'Mark': 6,
            'Point': ['P6', 'P4', 'P5']
        }, {
            'Mark': 5,
            'Point': ['P2', 'P3', 'P6', 'P5', 'P1.2', 'P1.1']
        }, {
            'Mark': 5,
            'Point': ['P2', 'P3', 'P6', 'P4', 'P1.2', 'P1.1']
        }, {
            'Mark': 5,
            'Point': ['P2', 'P3', 'P4', 'P5', 'P1.2', 'P1.1']
        }, {
            'Mark': 4,
            'Point': ['P2', 'P3', 'P1.1', 'P4', 'P1.2']
        }, {
            'Mark': 4,
            'Point': ['P2', 'P3', 'P1.1', 'P1.2', 'P5']
        }, {
            'Mark': 4,
            'Point': ['P2', 'P3', 'P1.1', 'P6', 'P1.2']
        }, {
            'Mark': 3,
            'Point': ['P2', 'P3', 'P1.1', 'P1.2']
        }, {
            'Mark': 2,
            'Point': ['P3', 'P1.2']
        }, {
            'Mark': 2,
            'Point': ['P3', 'P1.1']
        }, {
            'Mark': 2,
            'Point': ['P1.2', 'P1.1']
        }, {
            'Mark': 1,
            'Point': ['P1.1']
        }, {
            'Mark': 1,
            'Point': ['P1.2']
        }, {
            'Mark': 1,
            'Point': ['P2']
        }, {
            'Mark': 1,
            'Point': ['P3']
        }]
        return pointlist, textfdist, slist, rulelist
예제 #12
0
    def test_answer(self):
        self.logger.info("Test Student Answer Analysis")

        # Read in the correct answer to first question
        # TODO: Create helper function for reading question info as same code sequence used elsewhere.
        testStandardAnswerFile = "ans_Q1.txt"
        stdFilePath = os.path.join("algo/testdata/raw/Q1",
                                   testStandardAnswerFile)
        self.logger.info("stdanswer filepath:%s" % stdFilePath)
        if not os.path.isfile(stdFilePath):
            self.logger.error("Standard Test file doesn't exist:%s" %
                              testStandardAnswerFile)
            assert False
        fh = file(stdFilePath, "r")
        stdtext = fh.read()
        fh.close()

        # Perform text processing analysis over correct answer
        sinst = Standard()
        pointlist, textfdist, slist = sinst.Analysis(stdtext)
        std_pointlist_no = [point['Point_No'] for point in pointlist]
        self.logger.info("Points:%s" % std_pointlist_no)

        # Read in the standard as if it were
        # TODO: Just do an assignment for crying out loud! Such needless code repetiton!
        # ex: anstext = stdtext
        testAnswerFile = "ans_Q1.txt"
        ansFilePath = os.path.join("algo/testdata/raw/Q1", testAnswerFile)
        self.logger.info("answer filepath:%s" % ansFilePath)
        if not os.path.isfile(ansFilePath):
            self.logger.error("Answer file doesn't exist:%s" % testAnswerFile)
            assert False
        fh = file(ansFilePath, "r")
        anstext = fh.read()
        fh.close()

        # Create some dummy grading rules
        mockrulelist = [{
            'Mark':
            10,
            'Point': ['P1.1', 'P1.2', 'P1.3', 'P2', 'P3', 'P4', 'P5']
        }, {
            'Mark': 7,
            'Point': ['P1.1', 'P2', 'P3', 'P4', 'P5']
        }, {
            'Mark': 6,
            'Point': ['P1.1', 'P2', 'P3', 'P4']
        }, {
            'Mark': 5,
            'Point': ['P1.1', 'P2', 'P3']
        }, {
            'Mark': 3,
            'Point': ['P1.1', 'P2']
        }, {
            'Mark': 2,
            'Point': ['P1.1']
        }]
        pprint.pprint(mockrulelist)

        # Create the answer class instance and optionally override global frequency distribution from answer text.
        # TODO: Always use freq dist for student text (not standard).
        ans = Answer()
        if (USE_STUDENT_TEXT_DIST):
            textfdist = get_student_text_distribution(anstext, textfdist)

        # Preprocess the student answer and then compare resulting vectors against standard
        # TODO: Raise an exception if the result is not as expected
        mark, marklist, ommited = ans.Analysis(anstext, textfdist, slist,
                                               pointlist, mockrulelist)
        pprint.pprint(mark)
        pprint.pprint(ommited)
        self.logger.info("Test Student Answer Analysis Finished")
예제 #13
0
 def __init__(self):
     debug_print(
         "Warning: Using shameless hack (abStandard testing class): FIX ME!"
     )
     Standard.__init__(self)
예제 #14
0
from auxiliary import *

from standard import Standard
from baldwinian import Baldwinian
from lamarckian import Lamarckian


standard   = Standard()
baldwinian = Baldwinian()
lamarckian = Lamarckian()


if __name__ == '__main__':

    elapsed_time = execute_algorithm( standard, 'tai256c.dat' ) # bur26a
    print("Standard executing time: {:.3f}s\n".format(elapsed_time))

    # elapsed_time = execute_algorithm( baldwinian, 'tai256c.dat' )
    # print("Baldwinian executing time: {:.3f}s\n".format(elapsed_time))

    # elapsed_time = execute_algorithm( lamarckian, 'bur26a.dat' )
    # print("Lamarckian executing time: {:.3f}s\n".format(elapsed_time))
예제 #15
0
from AUXX import *
from standard import Standard

st  = Standard()

if __name__ == '__main__':

    time = execute_algorithm( st, 'tai256c.dat' ) 
    print("executing time Of Standard: {:.3f}s\n".format(time))
예제 #16
0
 def __init__(self, learning_rule=Standard(), use_theano_scan=False):
     self._learning_rule = learning_rule
     self._use_theano_scan = use_theano_scan
예제 #17
0
 def __init__(self, paramClass, paramDict):
     Standard.__init__(self, paramClass, paramDict)
     self.permutations = paramDict['permutations']