def main(): """comparison_test script entry point""" parser = argparse.ArgumentParser( description='vcdMaker tools comparison testing utility') parser.add_argument('--exec', '-e', required=True, help="Path to the vcdMaker executable") parser.add_argument('--testdir', '-t', required=True, help="Path to the directory containing tests") parser.add_argument('--verbose', '-v', action='store_true', default=False, help="Turns on verbose output") args = parser.parse_args() check_arguments(args) tests = Tests(args.testdir) executor = Executor(args.exec, tests.get_tests(), args.verbose) result, number_failed, number_total = executor.run() if not result: print('TEST PASSED ({})'.format(number_total)) exit_result = 0 else: print('TEST FAILED ({}/{})'.format(number_failed, number_total)) exit_result = 1 sys.exit(exit_result)
def __init__(self): self.read_data = ReadData() self.naive = Naive() self.branch_and_bound = Branch_And_Bound() self.tests = Tests() self.data = [] self.choice = 0 self.starting_city = 0
def __init__(self, parent): View.__init__(self, parent) self._devices = TestDeviceList(self._elements["treeWidgetDevices"]) self._actionStart = self._elements["actionStart"] self._actionStop = self._elements["actionStop"] self._actionPause = self._elements["actionPause"] self._actionResume = self._elements["actionResume"] self._actionStart.triggered.connect(self._startTests) self._actionStop.triggered.connect(self._stopTests) self._actionPause.triggered.connect(self._pauseTests) self._actionResume.triggered.connect(self._resumeTests) self._actionStart.setVisible(True) self._actionStop.setVisible(False) self._actionPause.setVisible(False) self._actionResume.setVisible(False) # Summary channel channels.add("SummaryChannel", "_ui_summary") # Progress channel pBar = QtGui.QProgressBar() pBar.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) font = pBar.font() font.setBold(True) pBar.setFont(font) self._parent.getStatusBar().addPermanentWidget(pBar, 1) self._progress = ProgressChannelHelper(pBar) channels.add(ProgressChannel, "_ui_progress", progress=self._progress) self._progress.testStarted.connect(self._onTestStarted) self._progress.testStopped.connect(self._onTestStopped) self._progress.stopped.connect(self._onStopped) self._tests = Tests(self._elements["treeWidgetLocations"], self._elements["treeWidgetTests"], self._elements["treeWidgetModels"]) self._elements["actionAdd"].triggered.connect(self._tests.addLocation) self._elements["actionRemove"].triggered.connect( self._tests.removeLocation) self._elements["actionExpand"].triggered.connect( self._tests.expandSelected) self._elements["actionExpandAll"].triggered.connect( self._tests.expandAll) self._elements["actionCollapse"].triggered.connect( self._tests.collapseSelected) self._elements["actionCollapseAll"].triggered.connect( self._tests.collapseAll) self._elements["actionRefresh"].triggered.connect(self._tests.refresh) # Initialize private test variables self._suiteRuns = 0 self._todoSuites = 0 self._testResult = None self._testRunner = None
def start_tests(self): test_num = 1 for test in self.__test_list: logger = get_logger(test_num) list_flows = [] test_obj = Tests(self.__w3, logger, self.__inspector) try: func_args = test["args"] if test_obj.is_thread(test["func"]): list_flows = [] flows = test["flows"] if flows > len(self.accounts): flows = len(self.accounts) for i in range(flows): j = i + 1 if j == len(self.accounts): j = 0 accounts = (self.accounts[i][0], self.accounts[i][1], self.accounts[j][0], self.accounts[j][1]) func_args.insert(0, accounts) list_flows.append( Thread(target=test_obj.start_test, args=(test["func"], func_args.copy()))) func_args.pop(0) else: list_flows.append( Thread(target=test_obj.start_test, args=(test["func"], func_args.copy()))) logger.info("Start test {0}(flows: {3}): {1}{2}".format( test_num, test["func"], test["args"], len(list_flows))) for flow in list_flows: flow.start() while True: list_alive = [ state.is_alive() for state in list_flows if state.is_alive() ] if not list_alive: break except TypeError as e: logger.error("\tUnhandled error in starting {2}:{0}{1}".format( e.__class__.__name__, e, test["func"])) continue finally: test_num += 1 sleep( 5 ) # There are cases when transactions in same tests do not have time to process on the node
def handle_mrregs(self, mr): if not self.factory.requester: if self.factory.verbosity: print('Ignoring mrregs - not requester') return for v in mr: req = v[0] if req['__class__'] != 'req_MR_REG': print('Error: expected req_MR_REG, got {}'.format( req['__class__'])) continue access = req['__value__']['access'] sz = req['__value__']['len'] rsp = v[1] if rsp['__class__'] != 'rsp_MR_REG': print('Error: expected rsp_MR_REG, got {}'.format( rsp['__class__'])) continue rsp_zaddr = rsp['__value__']['rsp_zaddr'] put_get_remote = MR.PUT_REMOTE | MR.GET_REMOTE test_remote = ((access & put_get_remote) == put_get_remote) if self.factory.load_store: access |= MR.REQ_CPU print('mr: rsp_zaddr={:#x}, sz={:#x}, access={:#x}'.format( rsp_zaddr, sz, access)) if test_remote: conn = self.factory.conn rmr = conn.do_RMR_IMPORT(self.remote_nodeid, rsp_zaddr, sz, access) pg_sz = 1 << rmr.pg_ps mask = (-pg_sz) & ((1 << 64) - 1) mmsz = (sz + (pg_sz - 1)) & mask pg_off = rmr.req_addr & ~mask if self.factory.load_store: rmm = mmap.mmap(conn.fno, mmsz, offset=rmr.offset) else: rmm = None t = Tests(self.factory.lmr, self.factory.lmm, rmr, sz, rmm, self.factory.xdm, self.factory.verbosity, self.factory.load_store, self.factory.physaddr) t.all_tests() else: print('skipping tests because mr not remote put/get')
def build(self): sm = ScreenManager() sm.add_widget(Login(name='login')) sm.add_widget(MainMenu(name='main_menu')) sm.add_widget(Profile(name='profile')) sm.add_widget(Results(name='results')) sm.add_widget(Tests(name='tests')) return sm
class Exp: def __init__(self): self.t = Tests() def start_first(self): numbers = self.t.execute_first() x = range(1, len(numbers) + 1) plt.figure() plt.plot(x, numbers) plt.xlabel('Dimensione grafo') plt.ylabel('Numero di componenti fortemente connesse') plt.grid() def start_second(self): numbers = self.t.execute_second() x = np.arange(0.0, 1.1, 0.1) plt.figure() plt.plot(x, numbers) plt.xlabel("Probabilita' di archi") plt.ylabel('Numero di componenti fortemente connesse') plt.grid() def start_third(self): grandezze = self.t.execute_third() x = range(1, len(grandezze) + 1) plt.figure() plt.plot(x, grandezze) plt.xlabel("Dimensione grafo") plt.ylabel('Grandezza massima componenti fortemente connesse') plt.grid() def start_fourth(self): tempi = self.t.execute_fourth() x = range(1, len(tempi) + 1) plt.figure() plt.plot(x, tempi) plt.xlabel("Dimensione grafo") plt.ylabel('Tempo di esecuzione (secondi)') plt.grid()
def main(): urlBase = '127.0.0.1' port = 8080 url = None for testing in ROUTINE: verb = testing.get('verb') pathUri = testing.get('path') method = testing.get('method') headers = testing.get('headers') body = testing.get('body') test = Tests(urlBase=urlBase, port=port, pathUri=pathUri, method=method, headers=headers, body=body, verb=verb) response = test.call() #print(response.code) #print(response.headers) #print(response.reason) #print(response.read().decode('utf-8')) # self.dictionary = self.response.read().decode('utf-8') # return json.loads(self.dictionary) if (url == None or url != pathUri + method): url = pathUri + method print(Color.SUCCESS_BLUE + url + Color.NORMAL) if (response.code == testing.get('return_code')): print(Color.SUCCESS_GREEN + '[OK] ' + str(testing.get('return_code')) + ' >> ' + testing.get('description') + Color.NORMAL) else: print(Color.FAIL + '[FAIL] ' + str(testing.get('return_code')) + ' >> ' + testing.get('description') + Color.NORMAL)
def main(): py.init() screen = py.display.set_mode((WIDTH, HEIGHT)) py.display.set_caption(TITLE) cc = (0, 0, 0) exit = False FPS = py.time.Clock() dt = 0 tests = Tests() while not exit: for event in py.event.get(): if event.type == py.QUIT: exit = True screen.fill(cc) tests.update(dt) tests.render(screen) py.display.update() dt = FPS.tick(60) py.quit()
def get_available_tests(self): test = Tests(self) return test.avaliable_tests()['response']
class Menu: def __init__(self): self.read_data = ReadData() self.naive = Naive() self.branch_and_bound = Branch_And_Bound() self.tests = Tests() self.data = [] self.choice = 0 self.starting_city = 0 def main_menu(self): while self.choice != 6: print('_________MENU_________') print('1. Wybierz dane') print('2. Wyświetl dane') print('3. Brute force') print('4. Branch and bound') print('5. Testy') print('6. Wyjście') self.choice = input('Wybór: ') if self.choice == '1': self.data = self.read_data.get_data() elif self.choice == '2': if self.data == [] or self.data == None: print('Brak danych do wyświetlenie.') else: print('Dane: ') for graph in self.data: print('') for row in graph: print(row) elif self.choice == '3': for graph in self.data: start = datetime.datetime.now() solution, path = self.naive.naive_algorithm( graph, self.starting_city) duration = datetime.datetime.now() - start print('Duration = ', duration) print('Solution = ', solution) print('Path = ', path) elif self.choice == '4': for graph in self.data: start = datetime.datetime.now() solution, path = self.branch_and_bound.find_solution( graph, self.starting_city) duration = datetime.datetime.now() - start print('Duration = ', duration) print('Solution = ', solution) print('Path = ', path) elif self.choice == '5': print('1. Brute force: ') print('2. Branch and bound: ') choice = int(input('Wybór: ')) if choice == 1: self.tests.testing_naive() if choice == 2: self.tests.testing_b_b() elif self.choice == '6': exit() else: print("Wprowadz poprawną liczbę")
from flask.ext.script import Manager from flask import current_app from tests import Tests from routes import Routes sub_manager = Manager(current_app) sub_manager.add_command('test', Tests()) sub_manager.add_command('routes', Routes())
def run_tests(): tests = Tests() tests.test_correlation_id()
def test(self, ex, name, *args, **kwds): name = 'Example %s %s' % (ex, name) Tests.test(self, name, *args, **kwds)
def main(): bootstrapper = Bootstrapper() bootstrapper.initialize() tests = Tests() tests.run()
"""Модуль проверки KavPyLibs""" __version__ = '1.0.0' from KavPyLibs.kav_logging import KavLog from tests import Tests # Основной ход выполнения программы print('KavPyLibs ver.'+__version__) # Получение логгера (с выводом на экран и в файл) logger = KavLog.get_logger(filename='test.log', log_level=KavLog.nameToLevel["DEBUG"]) # Методы проверки logger.debug('test') Tests.test_file_size(logger)
def __init__(self): self.t = Tests()
''' Tibre Diana Andreea, group 917 ''' from operations import Operations from conversions import Conversions from service import Service from ui import UI from tests import Tests from validations import Validation operations=Operations() conversions=Conversions() validations=Validation() service=Service(operations,conversions,validations) ui=UI(service) tests=Tests() tests.run() ui.run()
class Test(View): ''' A view for running test cases. ''' NAME = viewName() _UI_FILE = "test_view.ui" _ICON_FILE = ":/test/icons/system-run.png" _checkOnConnect = settings.get(NAME, "options", "check_on_connect", default="Yes", force=True) # Menus and Tool bar _menuFile = ("actionAdd", "actionRemove", None) _menuEdit = ("actionExpand", "actionExpandAll", "actionCollapse", "actionCollapseAll", None) _menuView = ("actionRefresh", None) _toolBar = ("actionAdd", "actionRemove", None, ("actionExpand", "actionExpandAll"), ("actionCollapse", "actionCollapseAll"), "actionRefresh", None, "actionStart", "actionPause", "actionResume", "actionStop") def __init__(self, parent): View.__init__(self, parent) self._devices = TestDeviceList(self._elements["treeWidgetDevices"]) self._actionStart = self._elements["actionStart"] self._actionStop = self._elements["actionStop"] self._actionPause = self._elements["actionPause"] self._actionResume = self._elements["actionResume"] self._actionStart.triggered.connect(self._startTests) self._actionStop.triggered.connect(self._stopTests) self._actionPause.triggered.connect(self._pauseTests) self._actionResume.triggered.connect(self._resumeTests) self._actionStart.setVisible(True) self._actionStop.setVisible(False) self._actionPause.setVisible(False) self._actionResume.setVisible(False) # Summary channel channels.add("SummaryChannel", "_ui_summary") # Progress channel pBar = QtGui.QProgressBar() pBar.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) font = pBar.font() font.setBold(True) pBar.setFont(font) self._parent.getStatusBar().addPermanentWidget(pBar, 1) self._progress = ProgressChannelHelper(pBar) channels.add(ProgressChannel, "_ui_progress", progress=self._progress) self._progress.testStarted.connect(self._onTestStarted) self._progress.testStopped.connect(self._onTestStopped) self._progress.stopped.connect(self._onStopped) self._tests = Tests(self._elements["treeWidgetLocations"], self._elements["treeWidgetTests"], self._elements["treeWidgetModels"]) self._elements["actionAdd"].triggered.connect(self._tests.addLocation) self._elements["actionRemove"].triggered.connect( self._tests.removeLocation) self._elements["actionExpand"].triggered.connect( self._tests.expandSelected) self._elements["actionExpandAll"].triggered.connect( self._tests.expandAll) self._elements["actionCollapse"].triggered.connect( self._tests.collapseSelected) self._elements["actionCollapseAll"].triggered.connect( self._tests.collapseAll) self._elements["actionRefresh"].triggered.connect(self._tests.refresh) # Initialize private test variables self._suiteRuns = 0 self._todoSuites = 0 self._testResult = None self._testRunner = None # Public methods: def saveState(self): ''' Saves the view's state to configuration. ''' View.saveState(self) self._tests.saveState() def loadState(self): ''' Loads the view's state from configuration. ''' View.loadState(self) self._tests.loadState() # Slots: #@QtCore.Slot(Device) def _deviceConnected(self, device): ''' Adds a device to list. ''' self._devices.add(device, check=self._checkOnConnect.getBool()) #@QtCore.Slot(Device) def _deviceDisconnected(self, device, error): ''' Removes given device from list. The error parameter can be set to True to indicate that the device was disconnected due to an error. ''' self._devices.remove(device) #@QtCore.Slot() def _startTests(self): ''' Starts execution of tests. ''' log.debug("Starting tests") self._actionStart.setVisible(False) devices = self._devices.getChecked() if not devices: runWarning("Select some devices first") self._actionStart.setVisible(True) return tests = self._tests.getCheckedTests() if not tests: self._actionStart.setVisible(True) return if sum([test.count() for test in tests]) == 0: runWarning("Selected test suites do not contain any test cases") self._actionStart.setVisible(True) return self._suiteRuns = 0 self._todoSuites = len(tests) self._testResult = testresult.TestResult() self._testRunner = TestRunner(devices, tests, self._testResult) self._devices.deviceChecked.connect(self._testRunner.addDevice) self._devices.deviceUnchecked.connect(self._testRunner.removeDevice) self._devices.setWarning(True) self._testRunner.start() self._actionStop.setVisible(True) self._actionPause.setVisible(True) #@QtCore.Slot() def _stopTests(self): ''' Stops execution of tests. ''' log.debug("Stopping tests") self._actionStart.setVisible(True) self._actionStop.setVisible(False) self._actionPause.setVisible(False) self._actionResume.setVisible(False) self._testRunner.stop() #@QtCore.Slot() def _pauseTests(self): ''' Pauses execution of tests. ''' log.debug("Pausing tests") self._actionStart.setVisible(False) self._actionStop.setVisible(True) self._actionPause.setVisible(False) self._actionResume.setVisible(True) self._testRunner.pause() #@QtCore.Slot() def _resumeTests(self): ''' Resumes execution of tests. ''' log.debug("Resuming tests") self._actionStart.setVisible(False) self._actionStop.setVisible(True) self._actionPause.setVisible(True) self._actionResume.setVisible(False) self._testRunner.resume() #@QtCore.Slot(testresult.TestResultBase, testresult.DeviceExecResult) def _onTestStarted(self, result, device): ''' Handles a start test execution of a test represented by the given result. ''' if isinstance(result, testresult.TestCaseResult): log.debug("Began execution of test case: %s" % result.id) # If it is a top-level test suite result then increase the counter of # running top-level test suites if result.parent is None: self._suiteRuns += 1 #@QtCore.Slot(testresult.TestResultBase, testresult.DeviceExecResult) def _onTestStopped(self, result, device): ''' Handles a stop test execution of a test represented by the given result. ''' if isinstance(result, testresult.TestCaseResult): log.debug("Finished execution of test case: %s" % result.id) # If it is a top-level test suite result then decrease the counters of # running top-level test suites and to do test suites. if result.parent is None: self._suiteRuns -= 1 self._todoSuites -= 1 # If all top-level test suites are done then join() the test runner if self._suiteRuns == 0 and self._todoSuites <= 0: self._testRunner.join() #@QtCore.Slot() def _onStopped(self): ''' Shows summary dialog after finishing test executions. ''' log.debug("All tests finished") self._actionStart.setVisible(True) self._actionStop.setVisible(False) self._actionPause.setVisible(False) self._actionResume.setVisible(False) self._devices.deviceChecked.disconnect(self._testRunner.addDevice) self._devices.deviceUnchecked.disconnect(self._testRunner.removeDevice) self._devices.setWarning(False) files = [] for c in self._testRunner.result.get(): if isinstance(c, channels.TestResultFileChannel) and c.isActive(): files.append((c.name, c.filePath())) dialog = ReportDialog( self._testResult.get(name='_ui_summary')[0].getSummary(), files, len(self._devices.getChecked()) > 0) dialog.closed.connect(self._progress.reset, type=QtCore.Qt.DirectConnection) dialog.runAgainClicked.connect(self._startTests, type=QtCore.Qt.QueuedConnection) dialog.showDetailsClicked.connect(self._showDetails) dialog.run() #@QtCore.Slot() def _showDetails(self): ''' Shows execution result in Result view. ''' resultView = self._parent.getView("result") if resultView is not None: log.debug("Showing details in Result view") resultView.activate() resultView.showLastResult()
along with this program. If not, see <http://www.gnu.org/licenses/>. """ # ------------------------------------------------------------------------- # MAIN # ------------------------------------------------------------------------- if __name__ == '__main__': import sys sys.path.append('src/') from tests import Tests # # VARIABLES # tests = Tests() # # CODE # # for each appropriate chunk of the original code we need one test to ensure it is working args = [ './br-make_tests.py', "environments/", "test3", "log/", "measurements/" ] # args = sys.argv tests.bank__initialize_standard_bank(args) tests.bank__update_maturity(args) # tests.bank__update_risk_aversion(args) #tricky one, doesnt work yet # tests.bank__get_interest(args) # tests.bank__liquidate_due_transactions(args)
from bar import Bar from interfazBaseDeDatos import InterfazBaseDeDatos from interfazDeUsuario import InterfazDeUsuario from interfazMaps import InterfazMaps from listaBares import ListaDeBares from tests import Tests db2 = "file.txt" test1 = Tests(db2) """test1.testAgregar() test1.testDarDeAlta() test1.testBuscarBaresCercanos()""" test1.testOrdenarBaresPor()
def run_fufu(self): os.system("cls") self.result_table = [] ''' get ssh connection and reset eth0 ip address ''' self.utils = Utils(self) if self.utils.ssh is None: self.result_table.append(['Connection to the system', 'FAIL']) print("Can't established ssh connection") raw_input('Press Enter for continue...') self.menu() else: self.result_table.append(['Connection to the system', 'PASS']) tests = Tests(self, self.utils) if not tests.check_bands(): self.menu() print('Enable Remote and Modem Communication: {}'.format( self.utils.set_remote_communication(1))) ''' save set files ''' self.utils.send_command('udp_bridge1', 'start') storm = Storm(self) for place, band in enumerate(self.utils.get_bands()): storm.save_setfile(place=place, band=band) self.result_table.append(['Save set file for IDOBR', 'PASS']) self.utils.send_command('udp_bridge1', 'stop') self.utils.set_filters(1) tests.verify_connections() ''' test power ''' tests.test_composite_power() ''' test bands status ''' tests.test_band_status() ''' test sw and patch version ''' tests.test_swv() ''' Set date and time''' tests.set_dateTime() ''' TTF calibration ''' tests.ttf_calibrate() ''' Band mute test ''' tests.mute_test() ''' test alarm ''' tests.test_ext_alarm() ''' gps/gpr test ''' tests.gpr_gps_test() ''' clear log ''' tests.clear_log() self.utils.print_table(['Description', 'Status'], self.result_table) self.utils.ssh.close() raw_input('Press Enter for continue...') self.menu()
def tests(*args, **kwargs): cam = kwargs['ctx']['cam'] test = Tests(cam) return jsonify(test.avaliable_tests())
from src.utils import Utils from src.view import View from src.model import Model from src.controller import Controller from tests import Tests if __name__ == '__main__': if Utils.check_testmode(): tests = Tests() tests.start() else: controller = Controller() model = Model(controller=controller) view = View(controller=controller) controller.start(model=model, view=view)
def run_test(*args, **kwargs): cam = kwargs['ctx']['cam'] test_type = kwargs['test_type'] method_name = kwargs['method_name'] test = Tests(cam) return jsonify(test.service_test(test_type, method_name))
""" #------------------------------------------------------------------------- # # MAIN # #------------------------------------------------------------------------- if __name__ == '__main__': import sys sys.path.append('../src/') from tests import Tests # # VARIABLES # tests = Tests() args = ['./do_tests.py', "environments/", "test1", "log/", "measurements/"] # # CODE: CALL TESTS # # # AGENT # #tests.agent__sync() #tests.agent__compute_decision('test1') # # NETWORK #
class Test(View): ''' A view for running test cases. ''' NAME = viewName() _UI_FILE = "test_view.ui" _ICON_FILE = ":/test/icons/system-run.png" _checkOnConnect = settings.get(NAME, "options", "check_on_connect", default="Yes", force=True) # Menus and Tool bar _menuFile = ( "actionAdd", "actionRemove", None ) _menuEdit = ( "actionExpand", "actionExpandAll", "actionCollapse", "actionCollapseAll", None ) _menuView = ( "actionRefresh", None ) _toolBar = ( "actionAdd", "actionRemove", None, ( "actionExpand", "actionExpandAll" ), ( "actionCollapse", "actionCollapseAll" ), "actionRefresh", None, "actionStart", "actionPause", "actionResume", "actionStop" ) def __init__(self, parent): View.__init__(self, parent) self._devices = TestDeviceList(self._elements["treeWidgetDevices"]) self._actionStart = self._elements["actionStart"] self._actionStop = self._elements["actionStop"] self._actionPause = self._elements["actionPause"] self._actionResume = self._elements["actionResume"] self._actionStart.triggered.connect(self._startTests) self._actionStop.triggered.connect(self._stopTests) self._actionPause.triggered.connect(self._pauseTests) self._actionResume.triggered.connect(self._resumeTests) self._actionStart.setVisible(True) self._actionStop.setVisible(False) self._actionPause.setVisible(False) self._actionResume.setVisible(False) # Summary channel channels.add("SummaryChannel", "_ui_summary") # Progress channel pBar = QtGui.QProgressBar() pBar.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter) font = pBar.font() font.setBold(True) pBar.setFont(font) self._parent.getStatusBar().addPermanentWidget(pBar, 1) self._progress = ProgressChannelHelper(pBar) channels.add(ProgressChannel, "_ui_progress", progress=self._progress) self._progress.testStarted.connect(self._onTestStarted) self._progress.testStopped.connect(self._onTestStopped) self._progress.stopped.connect(self._onStopped) self._tests = Tests(self._elements["treeWidgetLocations"], self._elements["treeWidgetTests"], self._elements["treeWidgetModels"]) self._elements["actionAdd"].triggered.connect(self._tests.addLocation) self._elements["actionRemove"].triggered.connect( self._tests.removeLocation) self._elements["actionExpand"].triggered.connect( self._tests.expandSelected) self._elements["actionExpandAll"].triggered.connect( self._tests.expandAll) self._elements["actionCollapse"].triggered.connect( self._tests.collapseSelected) self._elements["actionCollapseAll"].triggered.connect( self._tests.collapseAll) self._elements["actionRefresh"].triggered.connect(self._tests.refresh) # Initialize private test variables self._suiteRuns = 0 self._todoSuites = 0 self._testResult = None self._testRunner = None # Public methods: def saveState(self): ''' Saves the view's state to configuration. ''' View.saveState(self) self._tests.saveState() def loadState(self): ''' Loads the view's state from configuration. ''' View.loadState(self) self._tests.loadState() # Slots: #@QtCore.Slot(Device) def _deviceConnected(self, device): ''' Adds a device to list. ''' self._devices.add(device, check=self._checkOnConnect.getBool()) #@QtCore.Slot(Device) def _deviceDisconnected(self, device, error): ''' Removes given device from list. The error parameter can be set to True to indicate that the device was disconnected due to an error. ''' self._devices.remove(device) #@QtCore.Slot() def _startTests(self): ''' Starts execution of tests. ''' log.debug("Starting tests") self._actionStart.setVisible(False) devices = self._devices.getChecked() if not devices: runWarning("Select some devices first") self._actionStart.setVisible(True) return tests = self._tests.getCheckedTests() if not tests: self._actionStart.setVisible(True) return if sum([test.count() for test in tests]) == 0: runWarning("Selected test suites do not contain any test cases") self._actionStart.setVisible(True) return self._suiteRuns = 0 self._todoSuites = len(tests) self._testResult = testresult.TestResult() self._testRunner = TestRunner(devices, tests, self._testResult) self._devices.deviceChecked.connect(self._testRunner.addDevice) self._devices.deviceUnchecked.connect(self._testRunner.removeDevice) self._devices.setWarning(True) self._testRunner.start() self._actionStop.setVisible(True) self._actionPause.setVisible(True) #@QtCore.Slot() def _stopTests(self): ''' Stops execution of tests. ''' log.debug("Stopping tests") self._actionStart.setVisible(True) self._actionStop.setVisible(False) self._actionPause.setVisible(False) self._actionResume.setVisible(False) self._testRunner.stop() #@QtCore.Slot() def _pauseTests(self): ''' Pauses execution of tests. ''' log.debug("Pausing tests") self._actionStart.setVisible(False) self._actionStop.setVisible(True) self._actionPause.setVisible(False) self._actionResume.setVisible(True) self._testRunner.pause() #@QtCore.Slot() def _resumeTests(self): ''' Resumes execution of tests. ''' log.debug("Resuming tests") self._actionStart.setVisible(False) self._actionStop.setVisible(True) self._actionPause.setVisible(True) self._actionResume.setVisible(False) self._testRunner.resume() #@QtCore.Slot(testresult.TestResultBase, testresult.DeviceExecResult) def _onTestStarted(self, result, device): ''' Handles a start test execution of a test represented by the given result. ''' if isinstance(result, testresult.TestCaseResult): log.debug("Began execution of test case: %s" % result.id) # If it is a top-level test suite result then increase the counter of # running top-level test suites if result.parent is None: self._suiteRuns += 1 #@QtCore.Slot(testresult.TestResultBase, testresult.DeviceExecResult) def _onTestStopped(self, result, device): ''' Handles a stop test execution of a test represented by the given result. ''' if isinstance(result, testresult.TestCaseResult): log.debug("Finished execution of test case: %s" % result.id) # If it is a top-level test suite result then decrease the counters of # running top-level test suites and to do test suites. if result.parent is None: self._suiteRuns -= 1 self._todoSuites -= 1 # If all top-level test suites are done then join() the test runner if self._suiteRuns == 0 and self._todoSuites <= 0: self._testRunner.join() #@QtCore.Slot() def _onStopped(self): ''' Shows summary dialog after finishing test executions. ''' log.debug("All tests finished") self._actionStart.setVisible(True) self._actionStop.setVisible(False) self._actionPause.setVisible(False) self._actionResume.setVisible(False) self._devices.deviceChecked.disconnect(self._testRunner.addDevice) self._devices.deviceUnchecked.disconnect(self._testRunner.removeDevice) self._devices.setWarning(False) files = [] for c in self._testRunner.result.get(): if isinstance(c, channels.TestResultFileChannel) and c.isActive(): files.append((c.name, c.filePath())) dialog = ReportDialog( self._testResult.get(name='_ui_summary')[0].getSummary(), files, len(self._devices.getChecked()) > 0) dialog.closed.connect(self._progress.reset, type=QtCore.Qt.DirectConnection) dialog.runAgainClicked.connect(self._startTests, type=QtCore.Qt.QueuedConnection) dialog.showDetailsClicked.connect(self._showDetails) dialog.run() #@QtCore.Slot() def _showDetails(self): ''' Shows execution result in Result view. ''' resultView = self._parent.getView("result") if resultView is not None: log.debug("Showing details in Result view") resultView.activate() resultView.showLastResult()
along with this program. If not, see <http://www.gnu.org/licenses/>. """ #------------------------------------------------------------------------- # MAIN #------------------------------------------------------------------------- if __name__ == '__main__': import sys sys.path.append('src/') from tests import Tests # # VARIABLES # tests = Tests() # # CODE # # for each appropriate chunk of the original code we need one test to ensure it is working #args=['./netfimas.py', "test_environments/", "test3", "log/"] #tests.network__do_interbank_trades(args) #tests.network__remove_inactive_bank(args) #tests.updater__updater1(args) #tests.updater__liquidate_assets(args) #tests.updater__updater2(args) args=['./netfimas.py', "test_environments/", "test20", "log/", "measurements/"] args = sys.argv tests.test_fire_sales(args)
def start_test_four(t): r = t.exec_fourth() parole, res = r[0], r[1] print parole, res plt.figure() plt.plot([2, 3, 4], res) plt.title('numero parole trovate al variare dei grammi') plt.xlabel('grammi') plt.ylabel('numero parole trovate') plt.legend(parole) plt.grid() def start_test_five(t): t.exec_fifth() if __name__ == '__main__': t = Tests() start_test_one(t) start_test_two(t) start_test_three(t) start_test_four(t) start_test_five(t) plt.show()
with cd('results'): if not os.path.exists('data'): os.makedirs('data') with cd('data'): if not os.path.exists('Experiment-' + str(experiment_number)): os.makedirs('Experiment-' + str(experiment_number)) logger = setup_logger('results/data/Experiment-' + str(experiment_number), "__main__", "main") logger.info("###################################RUNNING EXPERIMENT NUM %s#########################", str(experiment_number)) logger.info("Program Arguments:") args_dict = vars(args) for key, value in args_dict.iteritems() : logger.info("%s=%s" % (str(key), str(value))) test_suite = Tests(logger, args) target_test, Y_pred, cost_list, cost_test_list, learning_rates, rmse = test_suite.run_tests() Y_pred_copy = np.copy(Y_pred) accuracy_score_Y_pred = np.rint(Y_pred_copy).astype(int) if args.test_type != 'f': logger.info('###################################Accuracy Results###############################') logger.info('Accuracy: ' + str(accuracy_score(target_test, accuracy_score_Y_pred))) logger.info('\n' + str(classification_report(target_test, accuracy_score_Y_pred))) else: logger.info('###################################Accuracy Results###############################') target_test_1d = target_test.ravel() Y_pred_1d = Y_pred.ravel() distance = 0
along with this program. If not, see <http://www.gnu.org/licenses/>. """ #------------------------------------------------------------------------- # MAIN #------------------------------------------------------------------------- if __name__ == '__main__': import sys sys.path.append('src/') from tests import Tests # # VARIABLES # tests = Tests() # # CODE # # for each appropriate chunk of the original code we need one test to ensure it is working args=['./br-make_tests.py', "environments/", "test3", "log/", "measurements/"] #args = sys.argv #tests.bank__initialize_standard_bank(args) #tests.bank__update_maturity(args) #tests.bank__update_risk_aversion(args) #tricky one, doesnt work yet #tests.bank__get_interest(args) #tests.bank__liquidate_due_transactions(args) #tests.bank__get_new_deposits(args) #tests.bank__transfer_required_deposits(args)
from src.utils import Utils from src.view import View from src.model import Model from src.controller import Controller from tests import Tests if __name__ == '__main__': if Utils.verificar_modo_teste(): tests = Tests() tests.iniciar() else: controller = Controller() view = View(controller=controller) model = Model(controller=controller) controller.segundo_init(model=model, view=view) view.segundo_init() model.segundo_init() controller.iniciar()
You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ #------------------------------------------------------------------------- # MAIN #------------------------------------------------------------------------- if __name__ == '__main__': import sys sys.path.append('src/') from tests import Tests # # VARIABLES # tests = Tests() # # CODE # # for each appropriate chunk of the original code we need one test to ensure it is working args = [ './br-make_tests.py', "environments/", "test3", "log/", "measurements/" ] #args = sys.argv #tests.bank__get_parameters_from_file(args) #tricky one, havent tried yet #tests.bank__apply_sifi_surcharge(args) #tricky one, havent tried yet #tests.bank__update_maturity(args) #tests.bank__update_risk_aversion(args) #tricky one, doesnt work yet, havent changed anything #tests.bank__get_interest(args)
if args.cuda: model_target.cuda() model_source.cuda() discriminator_model.cuda() # target_optimizer_encoder_params = [{'params': model_target.fc1.parameters()}, {'params': model_target.fc2.parameters()}] target_optimizer = optim.Adam(model_target.parameters(), lr=args.lr) # target_optimizer_encoder = optim.Adam(target_optimizer_encoder_params, lr=args.lr) source_optimizer = optim.Adam(model_source.parameters(), lr=args.lr) d_optimizer = optim.Adam(discriminator_model.parameters(), lr=args.lr) criterion = nn.BCELoss() if args.source == 'mnist': tests = Tests(model_source, model_target, classifyMNIST, 'mnist', 'fashionMnist', args, graph) elif args.source == 'fashionMnist': tests = Tests(model_source, model_target, classifyMNIST, 'fashionMnist', 'mnist', args, graph) else: raise Exception('args.source does not defined') def reset_grads(): model_target.zero_grad() model_source.zero_grad() discriminator_model.zero_grad() def gen(model, input_data, optimizer, loss, batch): reset_grads()