def run(parsed_args): if parsed_args.test: print('Running tests...') run_tests() else: print('Processing inputs...') process_inputs(parsed_args)
def main(): args = init_args() # unpack variables from args. allow flexible changes without using args user = args.user serial_number = args.s file_path = args.f jpeg_quality = args.q downsample = args.d test_number = args.test type = args.type if test_number != -1: test.run_tests(user, serial_number) else: if args.type == "r": # receiver utils.print_msg("Receiver!", DEBUG) receiver_main(user, serial_number, file_path) if args.type == "t": # transmitter utils.print_msg("Transmitter!", DEBUG) transmitter_main(user, serial_number, file_path, jpeg_quality, downsample)
def run(self): """ Execute the command. """ if run_tests() != 0: raise RuntimeError("test suite failed") return
def testing_menu(self): print("Testy:") print("1. Test pojedynczego uruchomienia algorytmu") print("2. Test wielokrotnego uruchomienia algorytmu") print("3. Wizualizacja map używanych w testach") print("R - Powrót") choice = input() if choice == "1": test.single_run_test() elif choice == "2": test.run_tests(100) elif choice == "3": test.maps_test() elif choice == "R" or "r": self.main_menu() else: self.testing_menu()
def parse_args(): ''' Parse the arguments. Not using the python argparse library? No, it does not seem to support simple inner groups in an intuative way, e.g. (-t | -r REPO_PATH [GENERATION_MAX]), gets turned into (-t -r REPO_PATH [GENERATION_MAX]), or complex hackary needs to done on subparsers. ''' try: #Run test suite if sys.argv[1]=="-t": sys.path.insert(0, "../tests/") from test import run_tests print("Running Tests") print("=============") run_tests() elif sys.argv[1]=="-r": repo_path = sys.argv[2] try: gen_max = int(sys.argv[3]) except: gen_max = DEFAULT_GEN_SIZE run_simulation(repo_path, gen_max) except IndexError: logger.error(traceback.format_exc()) print_usage() except Exception as e: logger.error(traceback.format_exc()) print(e)
def test_results(): return jsonify(run_tests())
#!/usr/bin/env python # encoding: utf-8 """ run_tests.py Created by Scott on 2012-12-26. Copyright (c) 2012 Scott Rice. All rights reserved. """ import sys, os, inspect from test import run_tests # The code below is to allow test cases to import the class they are testing by # using the syntax 'import ice.******'. # # This code was taken from a StackOverflow answer by sorin. Thanks bud! # http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder # # Get a reference to the current directory, without using __file__, which fails # in certain situations based on how you call the script in Windows cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0])) ice_folder = os.path.join(cmd_folder,"ice") if cmd_folder not in sys.path: sys.path.insert(0,cmd_folder) if ice_folder not in sys.path: sys.path.insert(1,ice_folder) run_tests()
def main(): test.run_tests()
import test test.run_tests()
import test import sys from getopt import getopt import models opts = getopt(sys.argv[1:], 'm:')[0] model_name = opts[0][1] if len(opts) else None test.run_tests(model_name)
'-s', type=int, default=False, help= 'seed for the random number generator (default: new seed at every run)' ) parser.add_argument('--verbose', '-v', action='store_true', help='show coloured graphs during calculation') parser.add_argument( '--path', '-p', default='test_cases.yml', help= 'path to file describing test cases in YAML format (default: ./test_cases.yml). ' 'See test_cases.yml for an example file.') args = parser.parse_args() seed = args.seed if not seed: seed = None return args.path, args.verbose, seed if __name__ == '__main__': path, verbose, seed = parse_args() results = run_tests(path, verbose=verbose, seed=seed) print(results) results.to_csv(f'results/{path}.csv', index=False)
def run_module_tests(): test.run_tests([test_echo, test_add])
#!/usr/bin/env python """Runs all unit tests with coverage analysis.""" import test if __name__ == '__main__': test.run_tests(with_coverage=True)
def train(model): print("Model done") # x_train = [] # y_train = [] # # preprocessing_function : # function that will be implied on each input. The function will run after the image is # resized and augmented. The function should take one argument: one image (Numpy tensor # with rank 3), and should output a Numpy tensor with the same shape. # we create two instances with the same arguments data_gen_args = dict( preprocessing_function=random_crop, # rescale=1. / 255, # featurewise_center=True, # featurewise_std_normalization=True, horizontal_flip=True, vertical_flip=True, validation_split=0.1) x_image_gen = ImageDataGenerator(**data_gen_args) y_image_gen = ImageDataGenerator(**data_gen_args) print("Before Img Gen FIT") # Provide the same seed and keyword arguments to the fit and flow methods seed = 1 # compute quantities required for featurewise normalization (std, center) # x_image_gen.fit(x_train, augment=True, seed=seed) # TODO: x_train NEED to be 4 dimensional # y_image_gen.fit(y_train, augment=True, seed=seed) x_gen = x_image_gen.flow_from_directory( 'pictures/keras_test', target_size=(img_width // scale_fact, img_width // scale_fact), batch_size=1, class_mode=None, # TODO: could be "input" save_to_dir="pictures/keras_test/training/training", # save_prefix="t0_", subset="training", interpolation="lanczos", seed=seed) y_gen = y_image_gen.flow_from_directory( 'pictures/keras_test', target_size=(img_width, img_width), batch_size=1, class_mode=None, # TODO: was None save_to_dir="pictures/keras_test/training/validation", # save_prefix="t0_", subset="training", interpolation="lanczos", seed=seed) print("Before Zip") # combine generators into one which yields x and y together train_generator = itertools.zip_longest(x_gen, y_gen) optimizer = Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0) model.compile(optimizer=optimizer, loss='mean_squared_error') print("Before fit_generator") model.fit_generator( train_generator, verbose=2, steps_per_epoch= 12, # equal to (nbr samples of your dataset) // (batch size) epochs=6, callbacks=get_callbacks()) run_tests(model)
def run(): """ The main method starts the GUI after asking for an api key if necessary. """ if not check_internet_connection_and_obtain_api_key(): return 0 # Create the data folder if it doesn't exist. if not os.path.exists(Config.data_folder): os.makedirs(Config.data_folder) if Config.online: if len(sys.argv) > 1: if sys.argv[1] in {'--test', '-t'}: import test test.run_tests() return 0 elif sys.argv[1] in ('--download-molecule-images', '-dmi'): import res_gen.image_downloader as id id.download_images() return 0 elif sys.argv[1] in ("-gba", "--generate-broadener-availability"): import res_gen.generate_broadener_availability as gba gba.generate_availability() return 0 if Config.high_dpi: # Enable High DPI display with PyQt5 QtWidgets.QApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True) # Fix for mac-based systems... os.environ['no_proxy'] = '*' ## # The following blocks of code verify the hapi API key is in place, and it # is valid. If it is not valid or in place the user will we prompted for # one. This code also checks for a working internet connection, as hapi # needs one to download data. In the future, if there is no internet # connection the GUI should simply disable the features that require it, # and there could be a periodic check for internet connection that will # re-enable them. from metadata.molecule_meta import MoleculeMeta WorkRequest.start_work_process() # Hapi is now started automatically in the work process # start = HapiWorker(WorkRequest.START_HAPI, {}) # start.start() # When a start_hapi request is sent, it starts automatically. _ = MoleculeMeta(0) from metadata.xsc_meta import CrossSectionMeta # If the cache is expired, download a list of the cross section meta file. # This also populates the CrossSectionMeta.molecule_metas field. _ = CrossSectionMeta(0) app = QtWidgets.QApplication(sys.argv) app.setStyle(QStyleFactory.create("Fusion")) window = MainWindow() window.gui.adjustSize() TextReceiver.init(window) _qt_result = app.exec_() TextReceiver.redirect_close() close = HapiWorker(WorkRequest.END_WORK_PROCESS, {}, callback=None) close.safe_exit() WorkRequest.WORKER.process.join() HapiThread.kill_all() return 0
from test import run_tests run_tests()
def run(self): """Run experiment""" print(self.tmp_dir) run_tests(self.args_tuple)