Esempio n. 1
0
 def test_fuzzy_match(self):
   """Tests that arrays containing different elements, where neither is
   a slice of the other, match if their elements are numerically close
   together."""
   self.assertTrue(comparator.compare(self.arr1, self.arr3,
                                      MFCC_MATCH_THRESHOLD))
   self.assertTrue(comparator.compare(self.arr3, self.arr1,
                                      MFCC_MATCH_THRESHOLD))
Esempio n. 2
0
    def execute(self):
        """
        Execute the Test Case
        """

        logger.info('Executing Test Case {}'.format(self.name))
        source_writer = ResultWriter(self.name, 'source')
        target_writer = ResultWriter(self.name, 'target')
        source_records = self.source_connection.execute_query(
            self.source_query, source_writer.write, self.sort_source)
        target_records = self.target_connection.execute_query(
            self.target_query, target_writer.write, self.sort_target)

        source_mismatch, target_mismatch = comparator.compare(
            source_records,
            target_records,
            max_mismatch_size=self.max_mismatch_size)
        test_result = TestResult(self.name, self.source_query,
                                 self.target_query, source_mismatch,
                                 target_mismatch)

        source_writer.close()
        target_writer.close()
        logger.info('Executed Test Case {}'.format(self.name))
        return test_result
Esempio n. 3
0
def run_testcase(i, exec_file, username, qid, user_attempts, JS):
    in_file = "{}/question{}/input{}.txt".format(input_dir, qid, i + 1)
    in_file_fd = open(in_file, "r")
    user_out = '{}/{}/question{}/output{}{}.txt'.format(
        path, username, qid, user_attempts, i + 1)
    user_out_fd = os.open(user_out,
                          os.O_RDWR | os.O_CREAT)  # user output after running
    des_file = "{}/{}/{}.txt".format(des_dir, qid,
                                     str(i))  # descrption file memory and time
    des_fd = open(des_file, "r")
    lines = des_fd.readlines()
    time = lines[0].strip()
    mem = lines[1].strip()  # memory
    des_fd.close()

    res = sandy_func(exec_file, in_file_fd, user_out_fd, time,
                     mem)  # stores the code
    in_file_fd.close()

    os.close(user_out_fd)
    expected_out = "{}/question{}/expected_output{}.txt".format(
        output_dir, qid, i + 1)

    if res == 1:  # if success compile then compare outputs
        res = compare(i, user_out, expected_out, JS)

    return res
Esempio n. 4
0
def run_testcase(i, exec_file, uid, qid):
    in_file = input_dir + "/" + str(qid) + "/" + str(
        i) + ".in"  # standard input for running

    in_file_fd = open(in_file, "r")
    user_out = user_dir + "/" + str(uid) + "/" + str(qid) + "/" + str(
        i) + ".uout"

    user_out_fd = os.open(user_out,
                          os.O_RDWR | os.O_CREAT)  # user output after running
    des_file = description_dir + "/" + str(qid) + "/" + str(
        i) + ".txt"  # description file

    des_fd = open(des_file, "r")
    lines = des_fd.readlines()
    time = lines[0].strip()
    mem = lines[1].strip()
    des_fd.close()

    res = sandy_func(exec_file, in_file_fd, user_out_fd, time, mem)

    in_file_fd.close()

    os.close(user_out_fd)
    actual_out = output_dir + "/" + qid + "/" + str(i) + ".out"

    if (res == 1):
        res = compare(user_out, actual_out)

    # print res
    os.remove(user_out)  # removing user output
    return res
Esempio n. 5
0
def run_test(protomol_path, conf_file, pwd):
    tests = 0
    testspassed = 0
    testsfailed = 0
    failedtests = []

    conf_param_overrides = parse_params(conf_file)
    epsilon = conf_param_overrides.get("epsilon", DEFAULT_EPSILON)
    scaling_factor = conf_param_overrides.get("scaling_factor", DEFAULT_SCALINGFACTOR)

    base = os.path.splitext(os.path.basename(conf_file))[0]
    logging.info("Executing Test: " + base)

    cmd = protomol_path[:]
    cmd.append(conf_file)

    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = p.communicate()
    if p.returncode > 0:
        s = "Not able to execute Protomol!\n"
        s += "cmd: " + str(cmd) + "\n"
        s += "stdout: " + stdout + "\n"
        s += "stderr: " + stderr + "\n"
        logging.critical(s)
    expects = []
    outputs = glob.glob("tests/output/" + base + ".*")
    outputtemp = []
    for output in outputs:
        outbase = os.path.basename(output)
        if os.path.exists("tests/expected/" + outbase):
            outputtemp.append(output)
            expects.append("tests/expected/" + outbase)

    outputs = outputtemp

    for i in xrange(len(outputs)):
        tests += 1
        ftype = os.path.splitext(os.path.basename(outputs[i]))[1]

        if ftype in [".dcd", ".header", ".xtc"]:
            continue

        ignoreSign = False

        # Ignore signs on eignevectors

        if ftype == ".vec":
            ignoreSign = True
        logging.info("Testing: " + expects[i] + " " + outputs[i])

        if comparator.compare(expects[i], outputs[i], epsilon, scaling_factor, ignoreSign):
            logging.info("Passed")
            testspassed += 1
        else:
            logging.warning("Failed")
            testsfailed += 1
            failedtests.append("Comparison of " + expects[i] + " and " + outputs[i])

    return (tests, testspassed, testsfailed, failedtests)
Esempio n. 6
0
def graph():
    global xlim
    global show_peaks
    refresh()
    if show_peaks_entry.get() == "0":
        show_peaks = 0
    if not dfs:
        load()
    if lmin_entry.get() and lmax_entry.get():
        xlim = [int(lmin_entry.get()), int(lmax_entry.get())]
    
    meds = entry2.get().split(",")
    meds = [int(i) for i in meds]  
    if title_entry.get():
        compare(meds, dfs, nombres, title=title_entry.get(), xlim=xlim, peaks=int(show_peaks))
    else:
        compare(meds, dfs, nombres, xlim=xlim, peaks=int(show_peaks))
Esempio n. 7
0
def menu():

    print(" ")
    a = input("Digite o primeiro número: ")

    if (comparator.validate(a)):
        print("")
        b = input("Digite o segundo número: ")
        if (comparator.validate(b)):
            comparator.compare(a, b)

        else:
            print("")
            print("Digite um número válido.")

    else:
        print(" ")
        print("Digite um número válido.")
Esempio n. 8
0
def fileparser(filename, path1, path2, outputpath):
    try:
        parse1 = parse(path1 + '/' + filename)
        parse2 = parse(path2 + '/' + filename)
        merged = compare(parse1, parse2)
        outputxml = generatexml(merged)
        writeonfile(outputxml, outputpath, '/' + filename)

    except Exception as e:
        print(e)
Esempio n. 9
0
def stresstest_with_comparator():
    test_n = 1
    while True:
        print(f"TEST #{test_n}")
        test_n += 1
        gen_test(config, test_n)
        res = compare(config)
        if res == 1:
            print('FAIL')
        elif res == 2:
            print('Something went wrong.')
        else:
            print('SUCCES')
        if res != 0:
            print_test(config)
Esempio n. 10
0
 def __init__(self, hatch, queue):
     multiprocessing.Process.__init__(self, name="worker for filtered data")
     self.hatch = hatch
     self.queue = queue
     self.visual = visual.visual()
     self.util = util.util(self.hatch.get('debug'))
     self.analyze = analyze.analyze(self.hatch.get('debug'))
     self.compare = comparator.compare(self.hatch.get('debug'), self.util)
     self.running = True
     self.counter = 0
     self.plot_counter = 0
     self.reset_counter = 0
     self.rawbuf = [ ]
     self.rawfft = [ ]
     self.raw = [ ]
     self.fft = [ ]
     self.word_tendency = None
     self.character = [ ]
     self.raw_character = [ ]
     self.uid = str(uuid.uuid4())
     self.start()
Esempio n. 11
0
def executeTests(userobjdir,
                 rmafter=True,
                 onlyobject=False,
                 testexec=lambda dif: print(dif)):

    if not onlyobject:
        testobjdir = readHeader()

    testindex = "1"
    difs = []
    while path.isfile(TESTS_FOLDER + "test" + testindex + ".txt"):
        testvals = readTest(testindex)

        if not path.isdir("renders"):
            mkdir("renders")

        imgorig = "renders/imgorig" + testindex + ".png"
        imguser = "******" + testindex + ".png"

        if not onlyobject:
            load(testobjdir, imgorig, *testvals)
        else:
            imguser = "******" + path.basename(
                userobjdir) + "_test" + testindex + ".png"

        load(userobjdir, imguser, *testvals)

        if not onlyobject:
            dif = compare(imgorig, imguser)
            difs.append(dif)
            testexec(dif)

            if rmafter:
                remove(imgorig)
                remove(imguser)

        testindex = str(int(testindex) + 1)

    return difs
Esempio n. 12
0
 def __init__(self, queue, debug, plot, dict, wave):
     multiprocessing.Process.__init__(self, name="worker for filtered data")
     self.queue = queue
     self.debug = debug
     self.plot = plot
     self.dict = dict
     self.wave = wave
     self.visual = visual.visual()
     self.util = util.util(debug)
     self.analyze = analyze.analyze(debug)
     self.compare = comparator.compare(debug, self.util)
     self.running = True
     self.counter = 0
     self.plot_counter = 0
     self.reset_counter = 0
     self.rawbuf = []
     self.rawfft = []
     self.raw = []
     self.fft = []
     self.word_tendency = None
     self.character = []
     self.raw_character = []
     self.uid = str(uuid.uuid4())
     self.start()
Esempio n. 13
0
 def test_a_lt_b(self):
     actual = compare(3, 7)
     self.assertEqual(-1, actual)
Esempio n. 14
0
def run_test(protomol_path, conf_file, pwd, parallel):
    tests = 0
    testspassed = 0
    testsfailed = 0
    failedtests = []

    conf_param_overrides = parse_params(conf_file)
    epsilon = conf_param_overrides.get('epsilon', DEFAULT_EPSILON)
    scaling_factor = conf_param_overrides.get('scaling_factor', DEFAULT_SCALINGFACTOR)

    base = os.path.splitext(os.path.basename(conf_file))[0]
    if not parallel:
        logging.info('Executing Test: ' + base)
    else:
        logging.info('Executing Parallel Test: ' + base)

    cmd = []
    if parallel:
        cmd.append('mpirun')
        cmd.append('-np')
        cmd.append('2')
    cmd.append(protomol_path)
    cmd.append(conf_file)

    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = p.communicate()
    if p.returncode > 0:
        s = 'Not able to execute Protomol!\n'
        s += 'cmd: ' + str(cmd) + '\n'
        logging.critical(s)
    expects = []
    outputs = glob.glob('tests/output/' + base + '.*')
    outputtemp = []
    for output in outputs:
        outbase = os.path.basename(output)
        if os.path.exists('tests/expected/' + outbase):
            outputtemp.append(output)
            expects.append('tests/expected/' + outbase)

    outputs = outputtemp

    for i in xrange(len(outputs)):
        tests += 1
        ftype = os.path.splitext(os.path.basename(outputs[i]))[1]

        if ftype in ['.header', '.xtc']:
            continue

        ignoreSign = False

        # Ignore signs on eignevectors
        if ftype == '.vec':
            ignoreSign = True

        logging.info('\tTesting: ' + expects[i] + ' ' + outputs[i])

        if ftype == ".dcd":
            if compare_dcd.compare_dcd(expects[i], outputs[i], epsilon, scaling_factor, ignoreSign):
                logging.info('\t\tPassed')
                testspassed += 1
            else:
                logging.warning('\t\tFailed')
                testsfailed += 1
                failedtests.append('Comparison of ' + expects[i] + ' and ' + outputs[i])
                if args.errorfailure:
                    sys.exit(1)
        else:
            if comparator.compare(expects[i], outputs[i], epsilon, scaling_factor, ignoreSign):
                logging.info('\t\tPassed')
                testspassed += 1
            else:
                logging.warning('\t\tFailed')
                testsfailed += 1
                failedtests.append('Comparison of ' + expects[i] + ' and ' + outputs[i])
                if args.errorfailure:
                    sys.exit(1)

    return (tests, testspassed, testsfailed, failedtests)
def classMaker(sequence, myRCList):
      print "\n"
      print "Reading frame 1: "
      Rframe1 = rFrameClass.Rframe(sequence, 0, 3)
      comparator.compare(Rframe1.format())
      print "\n"
      print "Reading frame 2: "
      Rframe2 = rFrameClass.Rframe(sequence, 1, 4)
      comparator.compare(Rframe2.format())
      print "\n"
      print "Reading frame 3: "
      Rframe3 = rFrameClass.Rframe(sequence, 2, 5)
      comparator.compare(Rframe3.format())
      print "\n"
      print "Reading frame 4: "
      Rframe4 = rFrameClass.Rframe(myRCList, 0, 3)
      comparator.compare(Rframe4.format())
      print "\n"
      print "Reading frame 5: "
      Rframe5 = rFrameClass.Rframe(myRCList, 1, 4)
      comparator.compare(Rframe5.format())
      print "\n"
      print "Reading frame 6: "
      Rframe6 = rFrameClass.Rframe(myRCList, 2, 5)
      comparator.compare(Rframe6.format())
Esempio n. 16
0
 def test_exact_partial_match(self):
   """Tests that an array matches a different array that contains all
   of its elements in order."""
   self.assertTrue(comparator.compare(self.arr1, self.arr2,
                                      MFCC_MATCH_THRESHOLD))
Esempio n. 17
0
import input_parser
import comparator
import subprocess
import util
import sys

monpoly = ""
stream = ""
stream_config = ""

inp = sys.argv[1:]

param = input_parser.get_param(inp)
for tool, paths in param:
    if tool == 'monpoly':
        popen = subprocess.Popen(monpoly + " -s " + paths.get('signature') + " -f" + paths.get('formula') +
                                 " -l" + paths.get('log') + " > m_out", stdout=True, stderr=False)
        popen.communicate()
        param.get('monpoly').update({'out': 'm_out'})
    elif tool == 'stream':
        s_param = paths.get('stream')
        util.stream_set_src(paths.get('script'), paths.get('logs'))
        dest = util.stream_get_dest(paths.get('script'))
        popen = subprocess.Popen(stream + " -c " + stream_config + s_param.get('script'))
        popen.communicate()
        param.get('stream').update({'out': dest})
comparator.compare(monpoly=param.get('monpoly').get('out'),
                   stream=param.get('stream').get('out'))

Esempio n. 18
0
    expects = filter(untestable, expects)
    outputs = filter(untestable, outputs)

    # Compare Outputs
    output_size = len(outputs)
    output_pass = []
    output_fail = []

    for i in xrange(output_size):
        ignoreSign = False
        ftype = os.path.splitext(os.path.basename(outputs[i]))[1]

        if ftype == '.vec':
            ignoreSign = True

        if comparator.compare(expects[i], outputs[i], epsilon, scaling_factor,
                              ignoreSign):
            string = outputs[i] + " matches"
            output_pass.append(string)
            print string
        else:
            string = outputs[i] + " differs"
            output_fail.append(string)
            print string

    print name + " " + str(output_size) + " " + str(len(output_pass))

    # Create XML
    if len(output_pass) == output_size:
        stats_pass += 1
        passed_tests.append({'id': testid, 'name': name})
    else:
Esempio n. 19
0
 def test_not_a_match(self):
   """Tests that arrays containing different elements, where neither is
   a slice of the other, do not match if their elements are numerically
   far apart."""
   self.assertFalse(comparator.compare(self.arr1, self.arr4,
                                       MFCC_MATCH_THRESHOLD))
Esempio n. 20
0
def run_test(protomol_path, conf_file, pwd, parallel):
    tests = 0
    testspassed = 0
    testsfailed = 0
    failedtests = []

    conf_param_overrides = parse_params(conf_file)
    epsilon = conf_param_overrides.get('epsilon', DEFAULT_EPSILON)
    scaling_factor = conf_param_overrides.get('scaling_factor',
                                              DEFAULT_SCALINGFACTOR)

    base = os.path.splitext(os.path.basename(conf_file))[0]
    if not parallel:
        logging.info('Executing Test: ' + base)
    else:
        logging.info('Executing Parallel Test: ' + base)

    cmd = []
    if parallel:
        cmd.append('mpirun')
        cmd.append('-np')
        cmd.append('2')
    cmd.append(protomol_path)
    cmd.append(conf_file)

    p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    (stdout, stderr) = p.communicate()
    if p.returncode > 0:
        s = 'Not able to execute Protomol!\n'
        s += 'cmd: ' + str(cmd) + '\n'
        logging.critical(s)
    expects = []
    outputs = glob.glob('tests/output/' + base + '.*')
    outputtemp = []
    for output in outputs:
        outbase = os.path.basename(output)
        if os.path.exists('tests/expected/' + outbase):
            outputtemp.append(output)
            expects.append('tests/expected/' + outbase)

    outputs = outputtemp

    for i in xrange(len(outputs)):
        tests += 1
        ftype = os.path.splitext(os.path.basename(outputs[i]))[1]

        if ftype in ['.header', '.xtc']:
            continue

        ignoreSign = False

        # Ignore signs on eignevectors
        if ftype == '.vec':
            ignoreSign = True

        logging.info('\tTesting: ' + expects[i] + ' ' + outputs[i])

        if ftype == ".dcd":
            if compare_dcd.compare_dcd(expects[i], outputs[i], epsilon,
                                       scaling_factor, ignoreSign):
                logging.info('\t\tPassed')
                testspassed += 1
            else:
                logging.warning('\t\tFailed')
                testsfailed += 1
                failedtests.append('Comparison of ' + expects[i] + ' and ' +
                                   outputs[i])
                if args.errorfailure:
                    sys.exit(1)
        else:
            if comparator.compare(expects[i], outputs[i], epsilon,
                                  scaling_factor, ignoreSign):
                logging.info('\t\tPassed')
                testspassed += 1
            else:
                logging.warning('\t\tFailed')
                testsfailed += 1
                failedtests.append('Comparison of ' + expects[i] + ' and ' +
                                   outputs[i])
                if args.errorfailure:
                    sys.exit(1)

    return (tests, testspassed, testsfailed, failedtests)
Esempio n. 21
0
 def test_a_eq_b(self):
     actual = compare(7, 7)
     self.assertEqual(0, actual)
Esempio n. 22
0
 def test_a_gt_b(self):
     actual = compare(7, 3)
     self.assertEqual(1, actual)
Esempio n. 23
0
 def test_exact_match(self):
   """Tests that an array matches itself."""
   self.assertTrue(comparator.compare(self.arr1, self.arr1,
                                      MFCC_MATCH_THRESHOLD))
Esempio n. 24
0
if __name__ == "__main__":
    desc = "code_diff сравнивает попарно посслыки разных пользователей на похожесть и если пара" \
           " подозрительна " \
           "показывает в программе сравнения (по умолчанию ./kdiff3) " \
           "в папке с поссылками за контест файлы должны назваться runid-userid-problem.{language suffix}"
    parser = argparse.ArgumentParser(description=desc, prog="code_diff")
    parser.add_argument('contest_folder', help='путь к папке с поссылками')
    parser.add_argument('-d', '--diff', default="./kdiff3", help='путь к программе сравнения файлов(kdiff3, vimdiff, '
                                                                 'diff и т.д.)')
    parser.add_argument('-l', '--log', default="./code_diff.log",
                        help='путь к файлу, где сохраняются подозрительные программы')
    parser.add_argument('-m', '--mode', default='last', choices=['all', 'last'], help='режим работы; all - проверять все '
                                                                                      'поссылки; last - проверять только '
                                                                                      'последнюю поссылку пользователя по '
                                                                                      'каждой задаче')
    parser.add_argument('-w', '--without-problem', dest='without', action="append",  help='задача которую не нужно проверять, '
                                                                                          'можно использовать несколько раз, если таких '
                                                                                          'задач много')
    parser.add_argument('--version', action='version', version='%(prog)s 0.0')
    parser.add_argument('--max-common-len', '-mcl', type=float, dest='mcl', help='максимальное соотношение '
                                                                                 'при котором файлы считаются не '
                                                                                 'подозрительными')
    parser.add_argument('-q', '--quiet', action="store_true", help='если указан парметр, то все подозрительные просто '
                                                                   'будут сохранены в log, и не будет вызываться '
                                                                   'программа сравнения')
    args = parser.parse_args()

    log_file = open(args.log, "w")
    comparator.compare(args.contest_folder, args.diff, log_file, args.without, args.mcl, args.quiet, args.mode)
    log_file.close()
Esempio n. 25
0
#!/usr/bin/python

import comparator
import sys
import argparse
import logging

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='File comparison tool for floating point data' )
    parser.add_argument('original', help='Original file to compare against')
    parser.add_argument('new', help='New file to comare against')
    parser.add_argument('epsilon', type=float, help='Epsilon to compare files with')
    parser.add_argument('--scale', type=float, default=float(1), help='Scaling factor for New file values')
    parser.add_argument('--ignore-sign', action='store_true', default=False, help='Ignore sign differences between files')
    parser.add_argument('--verbose', '-v', action='store_true', default=False, help='Verbose output')

    args = parser.parse_args()

    level = logging.INFO
    if args.verbose:
        level = logging.DEBUG
    logging.basicConfig(level=level)

    files_same = comparator.compare(args.original, args.new, args.epsilon, args.scale, ignoreSign=args.ignore_sign)

    if not files_same:
        logging.warn('Files Differ')
Esempio n. 26
0
    expects = filter(untestable, expects)
    outputs = filter(untestable, outputs)

    # Compare Outputs
    output_size = len(outputs)
    output_pass = []
    output_fail = []

    for i in xrange(output_size):
        ignoreSign = False
        ftype = os.path.splitext(os.path.basename(outputs[i]))[1]

        if ftype == ".vec":
            ignoreSign = True

        if comparator.compare(expects[i], outputs[i], epsilon, scaling_factor, ignoreSign):
            string = outputs[i] + " matches"
            output_pass.append(string)
            print string
        else:
            string = outputs[i] + " differs"
            output_fail.append(string)
            print string

    print name + " " + str(output_size) + " " + str(len(output_pass))

    # Create XML
    if len(output_pass) == output_size:
        stats_pass += 1
        xml_test = ET.SubElement(xml_pass, "Test")
        xml_test.set("id", str(testid))
Esempio n. 27
0
 def run(self):
     compare(self.contest_path, self.diff_program, self.log_file,
             self.without_problems, self.mcl, self.is_quiet, self.mode)
Esempio n. 28
0
                        type=float,
                        help='Epsilon to compare files with')
    parser.add_argument('--scale',
                        type=float,
                        default=float(1),
                        help='Scaling factor for New file values')
    parser.add_argument('--ignore-sign',
                        action='store_true',
                        default=False,
                        help='Ignore sign differences between files')
    parser.add_argument('--verbose',
                        '-v',
                        action='store_true',
                        default=False,
                        help='Verbose output')

    args = parser.parse_args()

    level = logging.INFO
    if args.verbose:
        level = logging.DEBUG

    files_same = comparator.compare(args.original,
                                    args.new,
                                    args.epsilon,
                                    args.scale,
                                    ignoreSign=arg.ignore_sign)

    if not files_same:
        logging.warn("Files Differ")
Esempio n. 29
0
 def run(self):
     compare(self.contest_path, self.diff_program, self.log_file, self.without_problems, self.mcl, self.is_quiet,
             self.mode)