Exemple #1
0
    def run(self):
        bisect = GitBisect(self.config.src_dir)
        # TODO: save revision in .md5 files and get the good
        # revision from refs when not provided by command line
        try:
            bisect.start(self.config.bad, self.config.good)
        except:
            print("Couldn't start git bisect")
            return
        finished = False
        while not finished:
            test_runner = TestRun(os.path.dirname(self._test), self._refsdir,
                                  self._outdir)
            try:
                self._builder.build()
            except:
                print(
                    "Impossible to find regression, build is broken in revision: %s"
                    % (self.__get_current_revision()))
                break
            test_runner.run_test(os.path.basename(self._test))
            if test_runner._n_passed == 0:
                finished = bisect.bad()
            else:
                finished = bisect.good()

        bisect.reset()
Exemple #2
0
    def run(self, options):
        config = Config()
        config.keep_results = options['keep_results']
        config.create_diffs = options['create_diffs']
        config.update_refs = options['update_refs']

        t = Timer()
        docs = options['tests']
        docs_dir = options['docs_dir']

        if len(docs) == 1:
            if os.path.isdir(docs[0]):
                if docs_dir is None:
                    docs_dir = docs[0]
                if docs_dir == docs[0]:
                    docs = []
            else:
                if docs_dir is None:
                    docs_dir = os.path.dirname(docs[0])
        else:
            if docs_dir is None:
                docs_dir = os.path.commonprefix(docs).rpartition(os.path.sep)[0]

        tests = TestRun(docs_dir, options['refs_dir'], options['out_dir'])
        status = tests.run_tests(docs)
        tests.summary()
        get_printer().printout_ln("Tests run in %s" % (t.elapsed_str()))

        return status
Exemple #3
0
    def __init__(self,
                 minders,
                 packageName,
                 closeDownKey,
                 closeDownKeyMaker,
                 mode,
                 devRun,
                 mailer,
                 logger,
                 statusText=''):
        # the summaryCallBack object passed in is the TesterXMLConverter.

        statusText = 'initialising'
        TestRun.__init__(self, minders, packageName, closeDownKey,
                         closeDownKeyMaker, logger, statusText)

        self.mode = mode
        self.devRun = devRun
        self.mailer = mailer
Exemple #4
0
    def run(self, options):
        config = Config()
        config.keep_results = options['keep_results']
        config.create_diffs = options['create_diffs']
        config.update_refs = options['update_refs']

        t = Timer()
        doc = options['tests']
        if os.path.isdir(doc):
            docs_dir = doc
        else:
            docs_dir = os.path.dirname(doc)

        tests = TestRun(docs_dir, options['refs_dir'], options['out_dir'])
        if doc == docs_dir:
            status = tests.run_tests()
        else:
            status = tests.run_test(os.path.basename(doc))
        tests.summary()
        get_printer().printout_ln("Tests run in %s" % (t.elapsed_str()))

        return status
def main(args):
    """The main entry point of the application

    """
    # Parse arguments and check if they exist
    parsed_arguments = config.parse_arguments(args)

    if not config.validate_input(parsed_arguments):
        print('Invalid command line arguments')
        sys.exit(0)

    config.setup_logging(
        default_level=int(parsed_arguments['level'])
    )

    logger.debug('Parsing env variables')
    env.read_envfile(parsed_arguments['env'])

    logger.info('Initializing TestRun object')
    test_run = TestRun()

    logger.info('Parsing XML file - %s', parsed_arguments['xml'])
    test_run.update_from_xml(parsed_arguments['xml'])

    logger.info('Parsing log file - %s', parsed_arguments['log'])
    test_run.update_from_ica(parsed_arguments['log'])

    if parsed_arguments['kvp']:
        logger.info('Getting KVP values from VM')
        test_run.update_from_vm([
            'OSBuildNumber', 'OSName', 'OSMajorVersion'
        ], stop_vm=True)

    # Parse values to be inserted
    logger.info('Parsing test run for database insertion')
    insert_values = test_run.parse_for_db_insertion()

    # Connect to db and insert values in the table
    logger.info('Initializing database connection')
    db_connection, db_cursor = sql_utils.init_connection()

    logger.info('Executing insertion commands')
    for table_line in insert_values:
        sql_utils.insert_values(db_cursor, table_line)

    logger.info('Committing changes to the database')
    db_connection.commit()
Exemple #6
0
    def run(self):
        bisect = GitBisect(self.config.src_dir)
        # TODO: save revision in .md5 files and get the good
        # revision from refs when not provided by command line
        try:
            bisect.start(self.config.bad, self.config.good)
        except:
            print("Couldn't start git bisect")
            return
        finished = False
        while not finished:
            test_runner = TestRun(os.path.dirname(self._test), self._refsdir, self._outdir)
            try:
                self._builder.build()
            except:
                print("Impossible to find regression, build is broken in revision: %s" % (self.__get_current_revision()))
                break
            test_runner.run_test(os.path.basename(self._test))
            if test_runner._n_passed == 0:
                finished = bisect.bad()
            else:
                finished = bisect.good()

        bisect.reset()
    def __init__(self,
                 files,
                 listofreaders=[],
                 testsetname='',
                 solufilename=''):
        self.files = files[0:len(files)]
        assert self.__listelementsdiffer(self.files)
        self.testruns = []
        self.readers = []
        self.datakeys = []
        for filename in files:
            testrun = TestRun(filename, solufilename, testsetname)
            self.testruns.append(testrun)
            testrun.settings = filename.split('.')[-2]

        self.datacollector = DataCollector()
        self.datacollector.registerListOfReaders(listofreaders)
        if solufilename != '':
            #        print 'solufiledatacollector initialized for solufilename:', solufilename
            self.solufiledatacollector = SoluFileDataCollector()

        self.readers = listofreaders
        for reader in self.readers:
            self.addDataKey(reader.datakey)
Exemple #8
0
    def run(self, options):
        config = Config()
        config.keep_results = options['keep_results']
        config.create_diffs = options['create_diffs']

        t = Timer()
        doc = options['tests']
        if os.path.isdir(doc):
            docs_dir = doc
        else:
            docs_dir = os.path.dirname(doc)

        tests = TestRun(docs_dir, options['refs_dir'], options['out_dir'])
        if doc == docs_dir:
            tests.run_tests()
        else:
            tests.run_test(os.path.basename(doc))
        tests.summary()
        print("Tests run in %s" % (t.elapsed_str()))
Exemple #9
0
    def run(self, options):
        config = Config()
        config.keep_results = options["keep_results"]
        config.create_diffs = options["create_diffs"]
        config.update_refs = options["update_refs"]

        t = Timer()
        doc = options["tests"]
        if os.path.isdir(doc):
            docs_dir = doc
        else:
            docs_dir = os.path.dirname(doc)

        tests = TestRun(docs_dir, options["refs_dir"], options["out_dir"])
        if doc == docs_dir:
            tests.run_tests()
        else:
            tests.run_test(os.path.basename(doc))
        tests.summary()
        print("Tests run in %s" % (t.elapsed_str()))
import subprocess
import string
# add the dir containing the testharness script to the library search path
calling_dir = os.path.dirname(sys.argv[0])
sys.path.append(calling_dir)
from TestRun import TestRun
from TestSuite import TestSuite
from TestCase import TestCase
from EnvVar import EnvVar


#####
# GLOBAL VARS
#####
debug = 0
test_run = TestRun()
is_running_on_win = 0
win_file_prefix = "C:/cygwin"
final_log_location = ""
sleep_interval_secs = 1
default_output_dir = "/tmp/testharness/output"
default_summary_file = "testharness_summary.out"
default_test_suite_dir = calling_dir + "/../../../tests"
testharness_front_end_host = "www.ctbg.acer.com"
harness_return_code = 0
kill_signal = 9
print_alive_msg = 0
alive_msg_interval = 30
expected_statuses = ['PASS', 'EXPECTED_TO_FAIL']

Exemple #11
0
        results.append(
            testrun.run(epoch_num=EPOCH_NUM,
                        batch_size=BATCH_SIZE,
                        activation=activation,
                        learning_rate=LEARNING_RATE,
                        hidden_layer_size=HIDDEN_LAYER_SIZE,
                        sigma=SIGMA,
                        mu=MU,
                        run_num=RUN_NUMBER))
    return results


if __name__ == '__main__':
    file = load_file("mnist.pkl")
    train, valid, test = file
    test_run = TestRun(file)

    batch_results = test_batch_size(test_run, [2, 50, 250, 5000, 10000, 50000])
    batch_output = open('batch.pkl', 'wb')
    pkl.dump(batch_results, batch_output)
    batch_output.close()

    weight_results = test_weights_range(test_run, [0.1, 0.5, 1, 2, 5, 10])
    weight_output = open('weight.pkl', 'wb')
    pkl.dump(weight_results, weight_output)
    weight_output.close()

    layers_results = test_hidden_layer_size(test_run,
                                            [5, 25, 50, 100, 200, 1000])
    layers_output = open('layers.pkl', 'wb')
    pkl.dump(layers_results, layers_output)
import commands
import subprocess
import string
# add the dir containing the testharness script to the library search path
calling_dir = os.path.dirname(sys.argv[0])
sys.path.append(calling_dir)
from TestRun import TestRun
from TestSuite import TestSuite
from TestCase import TestCase
from EnvVar import EnvVar

#####
# GLOBAL VARS
#####
debug = 0
test_run = TestRun()
is_running_on_win = 0
win_file_prefix = "C:/cygwin"
final_log_location = ""
sleep_interval_secs = 1
default_output_dir = "/tmp/testharness/output"
default_summary_file = "testharness_summary.out"
default_test_suite_dir = calling_dir + "/../../../tests"
testharness_front_end_host = "www.ctbg.acer.com"
harness_return_code = 0
kill_signal = 9
print_alive_msg = 0
alive_msg_interval = 30
expected_statuses = ['PASS', 'EXPECTED_TO_FAIL']

#####
Exemple #13
0
# Import Automation
from TestRun import ConfigRun
from TestRun import TestRun

# Start Test
run = TestRun()

url = 'https://m.tokopedia.com/'
browser = 'Firefox'

# Configure Browser
run.config_browser('Firefox', url)

# Open m.tokopedia.com

auto.browser.open_url(url)

koplakmenu = browser.get_element('//koplak')

categorymenu = auto.browser.get_element(
    '//div[@class="pdp-shop__action-holder"]//span[contains(@class, "favorite__btn-fav")]'
)
categorymenu.click()

run.execute_step('pause', )

# Close Browser
browser.close_browser()
Exemple #14
0
 def __init__(self, packageName, closedownKey, rttRunStartTime,
              closeDownKeyMaker, logger, failText, vetoFlag):
     # pass minders as an empty list
     TestRun.__init__(self, [], packageName, closedownKey,
                      closeDownKeyMaker, logger, failText)