コード例 #1
0
ファイル: TestCase.py プロジェクト: icejoywoo/some_demos
 def run(self, result):
     result = TestResult()
     result.testStarted()
     self.setUp()
     method = getattr(self, self.name)
     method()
     self.tearDown()
     return TestResult()
コード例 #2
0
def test_approx_nn(method, traindata, testdata, m, alpha):
    avg_distance = 0
    if method == "hashing":
        #train
        lsh = LocalitySensitiveHash(traindata, D=1000, m=m)
        #time test
        t0 = time.time()
        for testdoc_id, testdoc in testdata.iteritems():
            avg_distance += lsh.nearest_neighbor(testdoc,
                                                 depth=HW2_DEPTH).distance
    if method == "kdtree":
        #train
        kdt = KDTree(D)
        for i, document in traindata.iteritems():
            key = make_dense(document)
            kdt.insert(key, i)
        #time test
        t0 = time.time()
        for _, testdoc in testdata.iteritems():
            key = make_dense(testdoc)
            neighbor = kdt.nearest(key, alpha)
            avg_distance += EvalUtil.distance(testdoc, docdata[neighbor])

    #finish timing, report results
    mean_time = (time.time() - t0) / len(testdata)
    mean_distance = avg_distance / len(testdata)
    return TestResult(method,
                      m=m,
                      D=D,
                      alpha=alpha,
                      avg_time=mean_time,
                      avg_distance=mean_distance)
コード例 #3
0
def test_kd_tree(n, D, n_test, alphas):
    """
    Tests the query time and distance for a random data set and test set
    @param n: int - the number of points of the dataset
    @param D: int - the dimension of the data points
    @param n_test: int - the number of points to test
    @param alphas: [float] - a set of alphas to test
    @return [TestResult] array of objects of class TestResult, which has the average time and distance for a single query
    """
    documents = RandomData.random_dataset(n, DOCDIM)
    test_documents = RandomData.random_dataset(n_test, DOCDIM)

    rand_tree = KDTree(DOCDIM)
    for i, document in documents.iteritems():
        key = [document.get(idx) for idx in xrange(0, DOCDIM)]
        rand_tree.insert(key, i)

    times = []
    for alpha in alphas:
        start_time = time.clock()
        cum_dist = 0.0
        for i, test_document in test_documents.iteritems():
            key = [test_document.get(idx) for idx in xrange(0, DOCDIM)]
            doc_id = rand_tree.nearest(key, alpha)
            cum_dist += EvalUtil.distance(test_document, documents[doc_id])
        duration = time.clock() - start_time
        times.append(
            TestResult("KDTree", n, DOCDIM, alpha, duration / n_test,
                       cum_dist / n_test))
    return times
コード例 #4
0
    def on_test_executable_finished(self, executable_output):
        self.executables_count += 1
        self.entire_output += executable_output + "\n"

        tests_group_name = self._get_tests_group_name(executable_output)

        while (self._has_tests_results(executable_output)):
            test_result_message = self._get_next_text_result_message(
                executable_output)

            if "FAIL" in test_result_message:
                self.fails.append(
                    TestResult(tests_group_name, True, test_result_message))
            else:
                self.passes.append(
                    TestResult(tests_group_name, False, test_result_message))
            executable_output = executable_output.replace(
                test_result_message, "")
コード例 #5
0
    def newTest(self, name, tid=None, local=False, projectId=1):
        """
        Create a new test result widget
        Emit the signal "addTestTab"

        @param name: 
        @type name:

        @param tid: test id
        @type tid: int
        """
        wtr = TestResult.WTestResult(name, tid, self.parent, local, projectId)
        self.tests.update({self.testId: wtr})
        return wtr
コード例 #6
0
    def test(self, boundary):
        test_result = None
        # instead of recursion, we iterate `b` times and call `advance` to perform the next step of Algorithm 1 in the paper
        for i in xrange(0, boundary):
            test_result = self.advance()

            if test_result != None:
                # We have a decision by DT. Skip the remaining test iterations and return the result.
                return test_result

            # Test result is inconclusive, so continue

        if test_result == None:
            # if the test result is still inconclusive, we pick option 1 of DT, letting the test pass
            test_result = TestResult(TestResult.PASSED, self.history)

        return test_result
コード例 #7
0
def test_rptree(traindata, testdata, projdim):
    avg_distance = 0

    #train, start timer, test
    rptree = GaussianRandomProjection(traindata, D=DOCDIM, m=projdim)
    t0 = time.time()
    for _, testdoc in testdata.iteritems():
        neighbor = rptree.nearest_neighbor(testdoc, alpha=1)
        avg_distance += EvalUtil.distance(testdoc,
                                          rptree.documents[neighbor.doc_id])

    #finish timing, report results
    mean_time = (time.time() - t0) / len(testdata)
    mean_distance = avg_distance / len(testdata)
    return TestResult(method="rpkdt",
                      m=projdim,
                      D=DOCDIM,
                      alpha=1,
                      avg_time=mean_time,
                      avg_distance=mean_distance)
コード例 #8
0
ファイル: project_access.py プロジェクト: KDE/calligra
#!/usr/bin/env kross
# -*- coding: utf-8 -*-

import traceback
import Kross
import Plan
import TestResult


TestResult.setResult( True )
asserttext = "Test of property '{0}' failed:\n   Expected: '{2}'\n     Result: '{1}'"
asserttext2 = "Failed to set property '{0}' to '{1}'. Result: {2}"

try:
    project = Plan.project()
    assert project is not None
    
    property = 'Name'
    data = "Project name"
    before = project.name()
    Plan.beginCommand( "Set data" );
    res = project.setData(project, property, data)
    text = asserttext2.format(property, data, res)
    assert res == 'Success', text
    result = project.name()
    text = asserttext.format(property, result, data)
    assert result == data, text
    Plan.revertCommand()
    result = project.name()
    text = asserttext.format(property, result, before)
    assert result == before, text
コード例 #9
0
#!/usr/bin/env kross
# -*- coding: utf-8 -*-

import traceback
import Kross
import Plan
import TestResult

TestResult.setResult(True)
asserttext = "Test of property '{0}' failed:\n   Expected: '{2}'\n     Result: '{1}'"
asserttext2 = "Failed to set property '{0}' to '{1}'. Result: {2}"

try:
    project = Plan.project()
    assert project is not None

    account = project.createAccount(0)
    assert account is not None, "Could not create account"

    property = 'Name'
    data = "Account name"
    before = account.name()
    Plan.beginCommand("Set data")
    res = project.setData(account, property, data)
    text = asserttext2.format(property, data, res)
    assert res == 'Success', text
    result = account.name()
    text = asserttext.format(property, result, data)
    assert result == data, text
    Plan.revertCommand()
    result = account.name()
コード例 #10
0
outfile = options.outfile
verbose = options.verbose

if verbose:
    print('%s:' % prog)
    print('  report file:  %s' % res_file)
    print('  output file:  %s' % outfile)
    print('  out_type:     %s' % out_type)
    if out_type == 'r':
        print('  platform:     %s' % platform)
        print('  config:       %s' % config)

# ----------------------------------------------------------------------
#  Main process.
#
res = TestResult(res_file, scratch=False, verbose=0)
if out_type == 'r':
    lines = res.edit_result_log(platform, config)
    for line in lines:
        print(line)
    if outfile:
        fio = TextFio(outfile, 'w', encoding='cp932')
        if fio.open() < 0:
            Error(prog).abort('can\'t open "%s"' % outfile)
        fio.writelines(lines)
        fio.close()

if out_type == 'd':
    res.dump()

sys.exit(0)
コード例 #11
0
    #
    # Look for each expected result in filesystem and
    # assign a status to each result for later reports.
    # This processes through the expected results, from oldest to latest
    # For each result, it checks each model, and for each model, it checks each test
    # when it gets to a specific expected result, it calls get_result_data to find
    # it on the filesystem. All successfully retrieved results are returned
    # in the TestResult object. This object also includes path to file
    # where the result was found.
    #
    for my_datetime in result_datetime_list:
        for my_forecast_group_name in forecast_group_name_list:
            for my_model_name in model_name_list:
                for my_test_name in test_name_list:
                    test_result = TestResult.TestResult()
                    test_result.forecast_group_name = my_forecast_group_name
                    test_result.status = ResultStatus.INITIAL
                    test_result.softwareVersion = __version__
                    test_result.test_name = my_test_name
                    test_result.model_name = my_model_name
                    test_result.resultDateTime = my_datetime
                    test_result.processingDateTime = "%4d-%02d-%02d" % (
                        curdate.year, curdate.month, curdate.day)
                    test_result = ResultFinder.get_result_data(
                        test_result, config, my_model_name, my_test_name,
                        my_datetime)
                    fg_results.append(test_result)

    #
    # One or more reports can be generated about this format group using
コード例 #12
0
 def _makeResult(self):
     if DEBUG:
         sys.stdout.write("Test runner making result...\n")
     return TestResult.TestResult(self.stream)
コード例 #13
0
    def advance(self):
        # pick one of the three options of the algorithm
        option = self.test_case_selection.get_next_option(self.history)

        if option == TestCaseSelection.OPTION_PASS:
            # the test is passed
            return TestResult(TestResult.PASSED, self.history)

        elif option == TestCaseSelection.OPTION_INPUT:
            # get the next input from test case selection
            next_input = self.test_case_selection.get_next_input(self.history)

            # pass input to SUT and check if SUT provided an output in the meantime
            system_output = self.system_under_test.pass_input(next_input)

            if system_output != None:
                # the input is cancelled, because the output generated by the SUT must be processed first.
                # the new output may influence test case selection

                # we add the output to the history
                self.history.extend(system_output)

                # Check if the output is accepted (this is the case, if no counter example can be found)
                counterexample = self.output_verifier.check_output(
                    self.history)

                if counterexample != None:
                    # the test failed, because we received an invalid output
                    return TestResult(TestResult.FAILED, self.history,
                                      counterexample)

            else:  # system_output == None
                # the input has been accepted, so we add it to the history
                self.history.extend(next_input)

                # return None to indicate that the test is neither passed nor failed
                return None

        elif option == TestCaseSelection.OPTION_OUTPUT:
            # we wait for the next output or quiescence
            system_output = self.system_under_test.receive_output()

            if system_output == None:
                # we observe quiescence

                # we add the quiescence to the history
                self.history.extend(Output(None))

                # Check if quiescence is accepted (this is the case, if no counter example can be found)
                counterexample = self.output_verifier.check_quiescence(
                    self.history)

                if counterexample != None:
                    # the test failed, because quiescence is not allowed here
                    return TestResult(TestResult.FAILED, self.history,
                                      counterexample)

            else:
                # we add the verified output to the history
                self.history.extend(system_output)

                # Check if the output is accepted (this is the case, if no counter example can be found)
                counterexample = self.output_verifier.check_output(
                    self.history)

                if counterexample != None:
                    # the test failed, because we received an invalid output
                    return TestResult(TestResult.FAILED, self.history,
                                      counterexample)

            # return None to indicate that the test is neither passed nor failed
            return None

        else:
            raise Exception('Invalid option!')
コード例 #14
0
ファイル: SpringheadTest.py プロジェクト: java311/Springhead
# move to test directory
cwd = os.getcwd()
topdir = '%s/%s' % (src_path, Util.upath(test_dir))
os.chdir(Util.pathconv(topdir))

# preparations
head_dir = spr_path.abspath('inc')
tmpl_dir = '%s/bin' % spr_path.abspath('test')
csc_head = '%s/UseClosedSrcOrNot.h' % head_dir
use_tmpl = '%s/UseClosedSrc.h.template' % tmpl_dir
unuse_tmpl = '%s/UnuseClosedSrc.h.template' % tmpl_dir

if scratch:
    print('scratch result file')
res = TestResult(res_file, scratch, verbose=1)
csc = ClosedSrcControl(csc_head, use_tmpl, unuse_tmpl, dry_run, verbose)

# traverse start
trv = Traverse(testid, res, csc, ctl_file, section, toolset, platforms,
               configs, csusage, rebuild, timeout, report, audit, dry_run,
               verbose)
signal.signal(signal.SIGINT, KEYINTR.handler)
stat = trv.traverse(top)
res.finish()
csc.revive()

# back to start directory and make "result.log".
os.chdir(cwd)
cmnd = 'python GenResultLog.py'
outf = '-o ../log/result.log'