示例#1
0
    def run(self, options):
        config = Config()
        config.keep_results = options['keep_results']
        config.create_diffs = options['create_diffs']
        config.update_refs = options['update_refs']

        t = Timer()
        docs = options['tests']
        docs_dir = options['docs_dir']

        if len(docs) == 1:
            if os.path.isdir(docs[0]):
                if docs_dir is None:
                    docs_dir = docs[0]
                if docs_dir == docs[0]:
                    docs = []
            else:
                if docs_dir is None:
                    docs_dir = os.path.dirname(docs[0])
        else:
            if docs_dir is None:
                docs_dir = os.path.commonprefix(docs).rpartition(os.path.sep)[0]

        tests = TestRun(docs_dir, options['refs_dir'], options['out_dir'])
        status = tests.run_tests(docs)
        tests.summary()
        get_printer().printout_ln("Tests run in %s" % (t.elapsed_str()))

        return status
示例#2
0
    def run(self):
        bisect = GitBisect(self.config.src_dir)
        # TODO: save revision in .md5 files and get the good
        # revision from refs when not provided by command line
        try:
            bisect.start(self.config.bad, self.config.good)
        except:
            print("Couldn't start git bisect")
            return
        finished = False
        while not finished:
            test_runner = TestRun(os.path.dirname(self._test), self._refsdir,
                                  self._outdir)
            try:
                self._builder.build()
            except:
                print(
                    "Impossible to find regression, build is broken in revision: %s"
                    % (self.__get_current_revision()))
                break
            test_runner.run_test(os.path.basename(self._test))
            if test_runner._n_passed == 0:
                finished = bisect.bad()
            else:
                finished = bisect.good()

        bisect.reset()
示例#3
0
    def run(self, options):
        config = Config()
        config.keep_results = options['keep_results']
        config.create_diffs = options['create_diffs']

        t = Timer()
        doc = options['tests']
        if os.path.isdir(doc):
            docs_dir = doc
        else:
            docs_dir = os.path.dirname(doc)

        tests = TestRun(docs_dir, options['refs_dir'], options['out_dir'])
        if doc == docs_dir:
            tests.run_tests()
        else:
            tests.run_test(os.path.basename(doc))
        tests.summary()
        print("Tests run in %s" % (t.elapsed_str()))
示例#4
0
    def __init__(self,
                 files,
                 listofreaders=[],
                 testsetname='',
                 solufilename=''):
        self.files = files[0:len(files)]
        assert self.__listelementsdiffer(self.files)
        self.testruns = []
        self.readers = []
        self.datakeys = []
        for filename in files:
            testrun = TestRun(filename, solufilename, testsetname)
            self.testruns.append(testrun)
            testrun.settings = filename.split('.')[-2]

        self.datacollector = DataCollector()
        self.datacollector.registerListOfReaders(listofreaders)
        if solufilename != '':
            #        print 'solufiledatacollector initialized for solufilename:', solufilename
            self.solufiledatacollector = SoluFileDataCollector()

        self.readers = listofreaders
        for reader in self.readers:
            self.addDataKey(reader.datakey)
示例#5
0
        results.append(
            testrun.run(epoch_num=EPOCH_NUM,
                        batch_size=BATCH_SIZE,
                        activation=activation,
                        learning_rate=LEARNING_RATE,
                        hidden_layer_size=HIDDEN_LAYER_SIZE,
                        sigma=SIGMA,
                        mu=MU,
                        run_num=RUN_NUMBER))
    return results


if __name__ == '__main__':
    file = load_file("mnist.pkl")
    train, valid, test = file
    test_run = TestRun(file)

    batch_results = test_batch_size(test_run, [2, 50, 250, 5000, 10000, 50000])
    batch_output = open('batch.pkl', 'wb')
    pkl.dump(batch_results, batch_output)
    batch_output.close()

    weight_results = test_weights_range(test_run, [0.1, 0.5, 1, 2, 5, 10])
    weight_output = open('weight.pkl', 'wb')
    pkl.dump(weight_results, weight_output)
    weight_output.close()

    layers_results = test_hidden_layer_size(test_run,
                                            [5, 25, 50, 100, 200, 1000])
    layers_output = open('layers.pkl', 'wb')
    pkl.dump(layers_results, layers_output)
import commands
import subprocess
import string
# add the dir containing the testharness script to the library search path
calling_dir = os.path.dirname(sys.argv[0])
sys.path.append(calling_dir)
from TestRun import TestRun
from TestSuite import TestSuite
from TestCase import TestCase
from EnvVar import EnvVar

#####
# GLOBAL VARS
#####
debug = 0
test_run = TestRun()
is_running_on_win = 0
win_file_prefix = "C:/cygwin"
final_log_location = ""
sleep_interval_secs = 1
default_output_dir = "/tmp/testharness/output"
default_summary_file = "testharness_summary.out"
default_test_suite_dir = calling_dir + "/../../../tests"
testharness_front_end_host = "www.ctbg.acer.com"
harness_return_code = 0
kill_signal = 9
print_alive_msg = 0
alive_msg_interval = 30
expected_statuses = ['PASS', 'EXPECTED_TO_FAIL']

#####
示例#7
0
# Import Automation
from TestRun import ConfigRun
from TestRun import TestRun

# Start Test
run = TestRun()

url = 'https://m.tokopedia.com/'
browser = 'Firefox'

# Configure Browser
run.config_browser('Firefox', url)

# Open m.tokopedia.com

auto.browser.open_url(url)

koplakmenu = browser.get_element('//koplak')

categorymenu = auto.browser.get_element(
    '//div[@class="pdp-shop__action-holder"]//span[contains(@class, "favorite__btn-fav")]'
)
categorymenu.click()

run.execute_step('pause', )

# Close Browser
browser.close_browser()