def run(self): import setup _old_argv = _sys.argv try: _sys.argv = ['setup.py', '-q', 'build'] if not self.HIDDEN: _sys.argv.remove('-q') setup.setup() if 'java' not in _sys.platform.lower(): _sys.argv = [ 'setup.py', '-q', 'install_lib', '--install-dir', shell.native(self.dirs['lib']), '--optimize', '2', ] if not self.HIDDEN: _sys.argv.remove('-q') setup.setup() finally: _sys.argv = _old_argv for name in shell.files("%s/wtf" % self.dirs['lib'], '*.py'): self.compile(name) term.write("%(ERASE)s") term.green("All files successfully compiled.")
def compile(self, output_file, dry_run=False): configuration = self.get_configuration(output_file) jconfig = json.dumps(configuration, default=serialize, indent=2) if dry_run: print(jconfig) else: setup.setup(json.loads(jconfig))
def database_management(self): choices = [ 'Run Setup Script', 'Create Database', 'Update Tracked Players', 'Remove a Clan War', 'Go Back', 'Quit' ] clear() banner = pyfiglet.figlet_format('DB Management') print(banner) ans = get_answer(choices) if ans == 'Run Setup Script': setup() elif ans == 'Create Database': create_database() elif ans == 'Update Tracked Players': update_tracked_players() elif ans == 'Remove a Clan War': remove_clan_war() elif ans == 'Go Back': self.main_menu() elif ans == 'Quit': quit() _ = input('\nPress Enter to Continue...') self.database_management()
def __init__(self): logutils.setup_logging("mustikkabot") self.log = logging.getLogger("mustikkabot") self.basepath = tools.find_basepath() self.confdir = os.path.join(self.basepath, "config") self.datadir = os.path.join(self.basepath, "data") self.srcdir = os.path.join(self.basepath, "src") setup.setup(self) setup.do_migrations(self) self.ircsock = None self.lastReceived = None self.user = None self.channel = None self.eventmanager = EventManager() """ :type: EventManager""" self.modulemanager = ModuleManager() """ :type: ModuleManager""" self.accessmanager = AccessManager() """ :type: AccessManager""" self.timemanager = TimeManager() """ :type: TimeManager""" self.run = True
def commandstart(): print(coinlist[0]) setup(appdirpath, appdatfile, appdatadirpath, appdata, snpy, coinlist, exenames) coincontroller = Coincontroller(coinlist, rpcports) startcoinservers(coincontroller, exenames, envars, startupstatcheckfreqscnds, appdata) conn = coincontroller paramslist['getsynctime'] = conn # appfilemakeifno() while True: uinput = str(input('$$')).split() if len(uinput) > 0: if uinput[0].lower() in listcommands: if len(uinput) > 1: if uinput[0].lower() in paramslist: globals()[str('command' + uinput[0].lower())](paramslist[uinput[0]](uinput[1].lower())) #globals()[str('command' + uinput[0].lower())](uinput[1].lower()) else: print('\"' + uinput[1] + '\"' + ', is not a valid parameter. Type help for a list of available commands and parameters wrapped with bracket example: getsynctime [coin] is typed:/ngetsynctime turbostake') else: globals()[str('command' + uinput[0].lower())]() else: print('\"' + uinput[0] + '\"' + ', is not a valid command. Type help for a list of available commands.') else: print('invalid command. Type help for a list of available commands.')
def downloadZip(mes_Min, ano_Min, mes_Max, ano_Max, nome): setup.setup() end = 1 while 1 == 1: url = Definitions.definitions(nome)[0] if mes_Min <= 9: url = url + '/' + str(ano_Min) + '0' + str(mes_Min) else: url = url + '/' + str(ano_Min) + str(mes_Min) print(url) request = requests.get(url, allow_redirects=True) print(request.headers.get('content-type')) if mes_Min <= 9: open( 'Data/00_Zip/' + nome + str(ano_Min) + '-' + '0' + str(mes_Min) + '.zip', 'wb').write(request.content) unzipper('Data/00_Zip/' + nome + str(ano_Min) + '-' + '0' + str(mes_Min) + '.zip') else: open( 'Data/00_Zip/' + nome + str(ano_Min) + '-' + str(mes_Min) + '.zip', 'wb').write(request.content) unzipper('Data/00_Zip/' + nome + str(ano_Min) + '-' + str(mes_Min) + '.zip') mes_Min += 1 if mes_Min == 12: ano_Min = ano_Min + 1 mes_Min = 1 if end == 0: break if not (mes_Min != mes_Max or ano_Min != ano_Max): end = 0
def on_key_press(self, key, modifiers): if key == arcade.key.UP: if self.physics_engine.can_jump(): self.player_sprite.change_y = JUMP_SPEED elif key == arcade.key.LEFT: self.player_sprite.change_x = -MOVEMENT_SPEED elif key == arcade.key.RIGHT: self.player_sprite.change_x = MOVEMENT_SPEED elif key == arcade.key.SPACE and self.score > 0: self.score -= 1 self.proj = arcade.Sprite("images\\howdy.png", SPRITE_SCALING * 2) self.proj.center_x = self.player_sprite._get_center_x() self.proj.center_y = self.player_sprite._get_center_y()+ 10 self.proj.boundary_right = self.proj.center_x + SPRITE_SIZE * 100 self.proj.boundary_left = self.proj.center_x - SPRITE_SIZE * 100 if self.player_sprite.texture == self.texture_left: self.proj.change_x = -10 else: self.proj.change_x = 10 self.projectiles.append(self.proj) elif key == arcade.key.R: self.score = 0 setup.setup(self) elif key == arcade.key.X: arcade.window_commands.close_window()
def run(self): import setup _old_argv = _sys.argv try: _sys.argv = ["setup.py", "-q", "build"] if not self.HIDDEN: _sys.argv.remove("-q") setup.setup() if "java" not in _sys.platform.lower(): _sys.argv = [ "setup.py", "-q", "install_lib", "--install-dir", shell.native(self.dirs["lib"]), "--optimize", "2", ] if not self.HIDDEN: _sys.argv.remove("-q") setup.setup() finally: _sys.argv = _old_argv for name in shell.files("%s/tdi" % self.dirs["lib"], "*.py"): self.compile(name) term.write("%(ERASE)s") term.green("All files successfully compiled.")
def run(): if len(sys.argv) <= 1: show_help() else: command = sys.argv[1].lower() if command == "courses": print_courses() elif command == "setup": setup() elif command == "download": course_ids = get_course_choices("Select courses to download") if 3 <= len(sys.argv): output_dir = os.path.expanduser(sys.argv[2]) else: output_dir = os.getcwd() for course_id in course_ids: download_files(course_id, output_dir) elif command == "sync": config = get_config() sync(config) elif command == "config": verify = [{ "type": "rawlist", "message": "Select which variables you want to change", "name": "verification", "choices": ["Download directory", "Selected courses", "Both", "Exit"] }] answer = prompt(verify)["verification"] if answer == "Exit": exit() if answer == "Download directory" or answer == "Both": directory_question = [({ "type": "input", "message": "Enter your preferred download directory. Leave empty to reset.", "name": "directory" })] directory_answer = prompt(directory_question) output_directory = directory_answer["directory"] if output_directory == "": output_directory = "~/UforaFileFetcher" write_to_config("output_directory", output_directory) if answer == "Selected courses" or answer == "Both": course_ids = get_course_choices( "Select courses to add to config") write_to_config("courses", course_ids) print(answer) else: show_help()
def build(): if common.needs_setup(): setup.setup() print("\nGenerating project with CMake") common.exec([ 'cmake', '-S', '.', '-B', GEN_PATH, "-Dgtest_force_shared_crt=ON" ], "Could not generate project") os.chdir('gen/Linux') common.exec(['make'], "Could not build OUI engine") os.chdir('../..') outputFolder = "{}/linux".format(common.OUTPUT_FOLDER) if not os.path.isdir(outputFolder): os.makedirs(outputFolder) print("\nCopying OUI binaries") shutil.copy2("{}/OUI_Runtime".format(GEN_PATH), outputFolder) shutil.copy2("{}/tests/Test-OUI-runtime".format(GEN_PATH), outputFolder) shutil.copy2("{}/libOUI.so".format(OUI_ENGINE_BINARY_PATH), outputFolder) print("\nCopying data folder") if os.path.isdir(outputFolder + '/data'): shutil.rmtree(outputFolder + '/data') shutil.copytree('{}/data'.format(GEN_PATH), outputFolder + '/data')
def main(): '''Load, edit, and save a family tree''' # Don't step on an existing family tree! if not os.path.isfile('family.db'): setup.setup() cmd = '' while cmd != 'quit': display() cmd = raw_input("$ ") if cmd in ['insert']: table = raw_input(" insert where? ") insert(table) elif cmd in ['show']: table = raw_input(" show where? ") show(table) elif cmd in 'quit': print 'bye!' elif cmd in 'build': associate() elif cmd in 'search': table = raw_input(" search where? ") value = raw_input(" search what? ") user_search(table, value) raw_input(" \n any key to clear results") else: print "not implemented" time.sleep(2) os.system('clear') # clear the screen pass # and execute the command
def exec_command(cluster, run, command, **kwargs): log.info('exec_command: {0}'.format(command)) action = command.split(' ', 1) result = 0 if action[0] == 'sh': result = cmd(action[1])[0] elif action[0] == 'create': env.is_local = True env.host = 'localhost' container.create(cluster[action[1]]) env.is_local = False elif action[0] == 'delete': env.is_local = True env.host = 'localhost' container.delete(cluster[action[1]]) env.is_local = False elif action[0] == 'setup': env.runs = [run] env.user = CONF.job_user env.password = CONF.job_password CONF.user = CONF.job_user CONF.password = CONF.job_password setup(**kwargs) elif action[0] == 'manage': env.runs = [run] env.user = CONF.job_user env.password = CONF.job_password CONF.user = CONF.job_user CONF.password = CONF.job_password manage(action[1], **kwargs) log.info('result_command: {0}({1})'.format(command, result)) return result
def wrapper(*args, **kwargs): mock_client = mongomock.MongoClient() setup.client = mock_client setup.setup(quiet=True) schema.db = mock_client.harry_potter_trivia return func(*args, **kwargs)
def reset_config(option=True): stp.clean_folders(warning=0) if option: stp.setup() else: # update requirements for packing prior uplod to GitHub stp.enhance_requirements()
def run(self): import setup _old_argv = _sys.argv try: _sys.argv = ['setup.py', '-q', 'build'] if not self.HIDDEN: _sys.argv.remove('-q') setup.setup() if 'java' not in _sys.platform.lower(): _sys.argv = [ 'setup.py', '-q', 'install_lib', '--install-dir', shell.native(self.dirs['lib']), '--optimize', '2', ] if not self.HIDDEN: _sys.argv.remove('-q') setup.setup() finally: _sys.argv = _old_argv self.compile('rcssmin.py') term.write("%(ERASE)s") term.green("All files successfully compiled.")
def index(): if not os.path.isfile(os.path.join(constants.getIndexPath(), 'classes')): setup() dataDir = constants.getIndexPath() dirIndexName = constants.getDirIndexName() fileIndexName = constants.getFileIndexName() dirIndexNameTemp = dirIndexName + "_temp" fileIndexNameTemp = fileIndexName + "_temp" dirIndexFilePath = os.path.join(dataDir, dirIndexNameTemp) fileIndexFilePath = os.path.join(dataDir, fileIndexNameTemp) folders = [] files = [] rootDirs = constants.getRoots() for root in rootDirs: for dirName, subdirList, fileList in os.walk(root): # do not proceed if it's a hidden directory if constants.hidden(dirName): continue dirName = dirName.encode('ascii', "ignore") dirName = dirName.decode('ascii', "ignore") dirName = str(dirName) folders.append(dirName) # for f in fileList: # f = f.encode('ascii',"ignore") # f = f.decode('ascii',"ignore") # f = str(f) # if 'linux' in plt: # f = dirName + "/" + f # else: # f = dirName + "\\" + f # files.append(f) # for dir in subdirList[:]: # if constants.hidden(dir): # subdirList.remove(dir) folders.sort(key=lambda s: len(s)) # files.sort(key = lambda s: len(s)) file = open(dirIndexFilePath, 'w') for f in folders: file.write("%s\n" % f) file.close() # file = open(fileIndexFilePath, 'w') # for f in files: # file.write(f + '\n') # file.close() utils.deleteAndRename(dataDir, dirIndexName, dirIndexNameTemp)
def build(): if common.needs_setup(): setup.setup() debug = False if "-D" in sys.argv or "--debug" in sys.argv: debug = True # Will show our "no Visual Studio" error instead of CMake's msbuild_location = find_ms_build() version = get_visual_studio_version() build_type = "Debug" if debug else "Release" print("\nGenerating project with CMake") common.exec([ 'cmake', '-G', VISUAL_STUDIO_VERSION_TO_GENERATOR[version], '-S', '.', '-B', GEN_PATH, "-DSDL2_PATH='{}'".format(SDL2_PATH), "-DSDL2_IMAGE_PATH='{}'".format(SDL2_IMAGE_PATH), "-DSDL2_TTF_PATH='{}'".format(SDL2_TTF_PATH), "-Dgtest_force_shared_crt=ON", "-DCMAKE_BUILD_TYPE={}".format( build_type.upper()) ], "Could not generate project\nAre Visual Studio C++ tools and CMake 3.14+ installed?" ) print("\nBuilding project with MSBuild.exe") common.exec([ '{}'.format(msbuild_location), '{}/ALL_BUILD.vcxproj'.format(GEN_PATH), '/p:Configuration={}'.format(build_type), '/p:Platform=x64' ], "Could not build project") outputFolder = "{}/windows/{}".format(common.OUTPUT_FOLDER, "debug" if debug else "release") print('\nCopying OUI headers') if os.path.isdir("{}/include".format(common.OUTPUT_FOLDER)): shutil.rmtree("{}/include".format(common.OUTPUT_FOLDER)) shutil.copytree("include", "{}/include".format(common.OUTPUT_FOLDER)) print("\nCopying OUI binaries") file_util.copyAllWithExt(path='{}/{}'.format(GEN_PATH, build_type), ext='dll', outputPath=outputFolder) file_util.copyAllWithExt(path='{}/{}'.format(GEN_PATH, build_type), ext='lib', outputPath=outputFolder) file_util.copyAllWithExt(path='{}/tests/{}'.format(GEN_PATH, build_type), ext='exe', outputPath=outputFolder) print("\nCopying SDL binaries") file_util.copyAllWithExt(path='{}/windows'.format(common.LIB_PATH), ext='dll', outputPath=outputFolder, excludeFolders=['x86']) print("\nFinished build")
def main(): updater: Updater = Updater(token=botToken) dispatcher: Dispatcher = updater.dispatcher setup.setup(dispatcher) updater.start_polling() updater.idle()
def set_webhook(): setup() s = bot.setWebhook('https://telegram-bot-rest.appspot.com/HOOK') # s = bot.setWebhook('https://telegram-bot-rest.appspot.com/HOOK') if s: return "webhook setup ok" else: return "webhook setup failed"
def run(opts): if opts.get('--fid-calculate', None): calculate_fid(opts) return if opts.get('--build-exe', None): setup(opts) return
def __init__(self, parent=None): super(Calculator, self).__init__(parent) QApplication.setStyle(QStyleFactory.create('Fusion')) self.make_power_enabled = False self.ui.setupUi(self) setup(self, app.get_resource('backspace.png')) self.exp = Expression() self.on_ac_click()
def test2(): "with index on 'country'" cleanup(get_postgres_connection()) setup(get_postgres_connection()) run_queries(get_postgres_connection(), index_queries_gin) get_mongo_client().test.test.ensure_index("country") results = [0.0, 0.0, 0.0, 0.0] test_base(1000, results) print "test 2 - insert with GIN (+mongo index): ", results
def main(): filename = os.path.dirname(os.path.abspath(inspect.stack()[0][1])) config.read(filename + '/data.ini') if int(config['SETUP']['first_time']) == 1: setup.setup() exit() # authenticating the user global hashed_pwd hashed_pwd = hashed_pass(getpass.getpass(prompt='Password for script: ')) if unpad(bytes.decode(decrypt( config['SETUP']['check']))) == "dictionary_check": # if less than one argument if len(sys.argv) <= 1: sys.argv.append('--help') # Main script parser = argparse.ArgumentParser( description='A Command line password manager') parser.set_defaults(func=lambda x: parser.print_usage()) parser.add_argument( '-a', '--add', nargs='?', action='store', help= 'Add a new account. Just provide the unique account-name along with it' ) parser.add_argument( '-g', '--get', nargs='?', action='store', help= 'Copies the password of username passed as argument to your clipboard' ) parser.add_argument('-l', '--list', nargs='?', default='all', const='all', help='List usernames of accounts already added') args = parser.parse_args() # calling functions if args.add: adduser(args.add) elif args.get: retrieve(args.get) elif args.list: listall() else: print("Wrong password!!")
def check_setup(): try: imp.find_module('dhtreader') found = True except ImportError: found = False if found is False: import setup setup.setup()
def main(): setup() print('Welcome to Jenkins CLI') while True: answer = _read_input() if answer.startswith('status'): status(answer) elif answer.startswith('build'): deploy(answer) else: print('action n0t found')
def main(): opts = parser.parse_args() # shared settings if opts.root: root = opts.root else: root = os.getcwd() if opts.config: config = Config(opts.config) else: default_config = os.path.join(root, "config") if os.path.isfile(default_config): config = Config(default_config) else: config = Config() # subcommands if "setup" == opts.subcommand: setup(root) elif "update-repos" == opts.subcommand: org = opts.org or config.organisation_name if org is None: update_repos_parser.print_help() sys.exit(1) token = opts.token or config.github_token if opts.file is None: filename = os.path.join(root, "repository-urls") else: filename = opts.file update_organisation_repos(org, token, filename) elif "scan-repo" == opts.subcommand: repo_location = opts.location scan_repo(repo_location, root) elif "scan-repo-list" == opts.subcommand: if opts.file is None: repo_urls_filename = os.path.join(root, "repository-urls") else: repo_urls_filename = opts.file scan_repo_list(repo_urls_filename, root) elif "history" == opts.subcommand: history_message = opts.message or "Dupin search results" history(root, history_message, opts.notify, config) elif "auto-scan-all" == opts.subcommand: repo_urls_filename = os.path.join(root, "repository-urls") scan_repo_list(repo_urls_filename, root) history(root, "Dupin search results", opts.notify, config) return
def resetPrpr(): """ Removes all files from working directories, invokes prpr setup. """ os.remove('prpr.db') dirs = ['esc', 'incoming', 'logs', 'tables'] for dir in dirs: files = os.listdir(dir) for file in files: os.remove(dir + os.sep + file) import setup setup.setup()
def setUp(self): web_ca.app.config['TESTING'] = True web_ca.app.config['WTF_CSRF_ENABLED'] = False self.workdir = tempfile.mkdtemp() setup.setup(self.workdir) web_ca.app.config['WEB_CA_WORK_DIR'] = self.workdir shutil.copy('test/ca.crt', self.workdir) shutil.copy('test/ca.key', self.workdir) self.app = web_ca.app.test_client()
def execute(self, context): # <self.logger> may be set in <setup(..)> self.logger = None # defines for which layerId's material is set per item instead of per layer self.materialPerItem = set() scene = context.scene kwargs = {} # setting active object if there is no active object if context.mode != "OBJECT": # if there is no object in the scene, only "OBJECT" mode is provided if not context.scene.objects.active: context.scene.objects.active = context.scene.objects[0] bpy.ops.object.mode_set(mode="OBJECT") if not self.app.has(Keys.mode3d): self.mode = '2D' # manager (derived from manager.Manager) performing some processing self.managers = [] self.prepareLayers() if not len(self.layerIndices): self.layered = False osm = Osm(self) setup(self, osm) if "lat" in scene and "lon" in scene and not self.ignoreGeoreferencing: kwargs["projection"] = TransverseMercator(lat=scene["lat"], lon=scene["lon"]) else: kwargs["projectionClass"] = TransverseMercator osm.parse(**kwargs) self.process() self.render() # setting 'lon' and 'lat' attributes for <scene> if necessary if not "projection" in kwargs: # <kwargs["lat"]> and <kwargs["lon"]> have been set in osm.parse(..) scene["lat"] = osm.lat scene["lon"] = osm.lon if not self.app.has(Keys.mode3d): self.app.show() return {"FINISHED"}
def main(): parser = argparse.ArgumentParser() parser.add_argument('command_name') parser.add_argument('command_args', nargs='*') args = parser.parse_args() command_name = args.command_name command_args = args.command_args if command_name == "setup": generate_bulbsconf() return setup(command_args) if command_name == "bulbsconf": return generate_bulbsconf() config = Config() # try to import graph from the local bulbsconf if it exists path = config.working_dir sys.path.insert(0, path) from bulbsconf import graph command = Command(config, graph) command._execute(command_name, command_args)
def main(): #SETUP questionPath, answersPath, returnPath, databasePath, testName, amount = setup.setup( ) #module1 import test from file questions, answers = import_test.collect_data(questionPath, answersPath) # #export_tests.remove(databasePath) #database.createDatabase(databasePath) #database.appendData(testName,questions,answers,databasePath) #names = database.getListOfTests(databasePath) #print(names) #questions2, answers2 = database.getFromDatabase(testName,databasePath) #moculde2 generate single test_group + answer_group + correct answers #test_group, answer_group, correct_answers = create_test_group(questions2,answers2) #module2+3 print(questions) print(answers) #export_tests.remove(returnPath) tests = generate_tests.create_tests(2, questions, answers, 3)
def main(): """Entry point to the Analytics API Load Tests.""" log.setLevel(log.INFO) cli_arguments = cli_parser.parse_args() if cli_arguments.version: show_version() sys.exit(0) else: cfg = setup(cli_arguments) coreapi_url = os.environ.get('F8A_SERVER_API_URL', None) component_analysis = ComponentAnalysis(coreapi_url, cfg["access_token"], cfg["user_key"], True) stack_analysis = StackAnalysis(coreapi_url, cfg["access_token"], cfg["user_key"], True) check_system(component_analysis) try: tests = read_csv_as_dicts(cfg["input_file"]) except Exception as e: log.error("Test description can not be read") log.error(e) sys.exit(0) t1 = time() tags = cfg["tags"] start_tests(cfg, tests, tags, component_analysis, stack_analysis) t2 = time() log.info("Start time: {}".format(t1)) log.info("End time: {}".format(t2)) log.info("Duration: {}".format(t2 - t1))
def main(): parser = argparse.ArgumentParser(description='Training fully-connected and convolutional networks using backpropagation (BP), feedback alignment (FA), direct feedback alignment (DFA), and direct random target projection (DRTP)') # General parser.add_argument('--cpu', action='store_true', default=False, help='Disable CUDA and run on CPU.') # Dataset parser.add_argument('--dataset', type=str, choices = ['regression_synth', 'classification_synth', 'MNIST', 'CIFAR10', 'CIFAR10aug'], default='MNIST', help='Choice of the dataset: synthetic regression (regression_synth), synthetic classification (classification_synth), MNIST (MNIST), CIFAR-10 (CIFAR10), CIFAR-10 with data augmentation (CIFAR10aug). Synthetic datasets must have been generated previously with synth_dataset_gen.py. Default: MNIST.') # Training parser.add_argument('--train-mode', choices = ['BP','FA','DFA','DRTP','sDFA','shallow'], default='DRTP', help='Choice of the training algorithm - backpropagation (BP), feedback alignment (FA), direct feedback alignment (DFA), direct random target propagation (DRTP), error-sign-based DFA (sDFA), shallow learning with all layers freezed but the last one that is BP-trained (shallow). Default: DRTP.') parser.add_argument('--optimizer', choices = ['SGD', 'NAG', 'Adam', 'RMSprop'], default='NAG', help='Choice of the optimizer - stochastic gradient descent with 0.9 momentum (SGD), SGD with 0.9 momentum and Nesterov-accelerated gradients (NAG), Adam (Adam), and RMSprop (RMSprop). Default: NAG.') parser.add_argument('--loss', choices = ['MSE', 'BCE', 'CE'], default='BCE', help='Choice of loss function - mean squared error (MSE), binary cross entropy (BCE), cross entropy (CE, which already contains a logsoftmax activation function). Default: BCE.') parser.add_argument('--freeze-conv-layers', action='store_true', default=False, help='Disable training of convolutional layers and keeps the weights at their initialized values.') parser.add_argument('--fc-zero-init', action='store_true', default=False, help='Initializes fully-connected weights to zero instead of the default He uniform initialization.') parser.add_argument('--dropout', type=float, default=0, help='Dropout probability (applied only to fully-connected layers). Default: 0.') parser.add_argument('--trials', type=int, default=1, help='Number of training trials Default: 1.') parser.add_argument('--epochs', type=int, default=100, help='Number of training epochs Default: 100.') parser.add_argument('--batch-size', type=int, default=100, help='Input batch size for training. Default: 100.') parser.add_argument('--test-batch-size', type=int, default=1000, help='Input batch size for testing Default: 1000.') parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate. Default: 1e-4.') # Network parser.add_argument('--topology', type=str, default='CONV_32_5_1_2_FC_1000_FC_10', help='Choice of network topology. Format for convolutional layers: CONV_{output channels}_{kernel size}_{stride}_{padding}. Format for fully-connected layers: FC_{output units}.') parser.add_argument('--conv-act', type=str, choices = {'tanh', 'sigmoid', 'relu'}, default='tanh', help='Type of activation for the convolutional layers - Tanh (tanh), Sigmoid (sigmoid), ReLU (relu). Default: tanh.') parser.add_argument('--hidden-act', type=str, choices = {'tanh', 'sigmoid', 'relu'}, default='tanh', help='Type of activation for the fully-connected hidden layers - Tanh (tanh), Sigmoid (sigmoid), ReLU (relu). Default: tanh.') parser.add_argument('--output-act', type=str, choices = {'sigmoid', 'tanh', 'none'}, default='sigmoid', help='Type of activation for the network output layer - Sigmoid (sigmoid), Tanh (tanh), none (none). Default: sigmoid.') args = parser.parse_args() (device, train_loader, traintest_loader, test_loader) = setup.setup(args) train.train(args, device, train_loader, traintest_loader, test_loader)
def test_config_extra_white_space(self): base_path = os.getcwd() resource_path = os.path.join(base_path, 'resources') project_path = os.path.abspath(os.path.join('..', 'testproject')) args = [ '-c', os.path.join(base_path, 'tests', 'test_run_no_sta_whitespace.cfg'), '-f', '-n', '-r', resource_path ] display_event = threading.Event() thread_kill_event = threading.Event() mutex = threading.Lock() event_list = Event_list() thread_list = [] config, filemanager, runmanager = setup(args, display_event, event_list=event_list, thread_list=thread_list, kill_event=thread_kill_event, mutex=mutex) self.assertTrue(isinstance(config, dict)) self.assertEqual( config['global']['source_path'], '/global/homes/r/renata/ACME_simulations/20171011.beta2_FCT2-icedeep_branch.A_WCYCL1850S.ne30_oECv3_ICG.edison/' )
def evaluate(config): # SETUP components = setup.setup(config) image_info = components["image_info"] heads_info = components["heads_info"] output_files = components["output_files"] state_folder = components["state_folder"] image_folder = components["image_folder"] net = components["net"] model = Model.load(state_folder, net=net, use_best_net=True) preprocessing = pre.SimplePreprocessing( image_info=image_info, prescale_all=config.preprocessor.prescale_all, prescale_factor=config.preprocessor.prescale_factor, ) preprocessor = pre.EvalImagePreprocessor( image_info=image_info, preprocessing=preprocessing, output_files=output_files, do_render=config.output.rendering.enabled, render_limit=config.output.rendering.limit, ) eval_dataset = data.EvalDataset( eval_folder=image_folder, preprocessor=preprocessor, input_size=heads_info.input_size, extensions=config.dataset.extensions, ) eval_dataloader = data.EvalDataLoader(dataset=eval_dataset) model.evaluate(output_files=output_files, loader=eval_dataloader)
def cli_setup(argv): """Command-line interface for setting up the remote environment.""" parser = argparse.ArgumentParser( prog="bazel_bf setup", description=""" Set up the remote environment. Specify --region, --s3_bucket and --s3_key to specify a remote config for the first time. (After bazel_bf setup has been called, this info is stored in the local config file "~/.bazel_bf/config.json"). """) parser.add_argument("--region", type=str) parser.add_argument("--s3_bucket", type=str) parser.add_argument("--s3_key", type=str) args = parser.parse_args(argv) if args.region or args.s3_bucket or args.s3_key: if not args.region or not args.s3_bucket or not args.s3_key: raise CommandLineException( "for initial setup, --region, --s3_bucket and --s3_key are all mandatory" ) config.write_local_config( region=args.region, s3_bucket=args.s3_bucket, s3_key=args.s3_key) lambda_config = config.read_config() next_lambda_config = setup.setup(lambda_config) config.write_config(next_lambda_config)
def main(): """ Main function. All operations start from within here """ env, parameter = setup() tf.reset_default_graph() #model_vars #W, b, input, Qout, predict, nextQ, loss, trainer, updateModel = init_graph() W, b, forward_dict, loss_dict = init_graph() # forward_dict = {"input":input, # "Qout":Qout, # "predict":predict # } # loss_dict = {"nextQ": nextQ, # "loss": loss, # "trainer":trainer, # "updateModel":updateModel # } saver = setup_saver(W, b) reward_list, steps_list = train(env=env, parameter=parameter, saver=saver, forward_dict=forward_dict, loss_dict=loss_dict)
def setup(cmdline): survey = model.survey.Survey.new(cmdline['project']) # Cleanup of options. if cmdline['global_id'] == '': cmdline['global_id'] = None import setup return setup.setup(survey, cmdline)
def __init__(self): conn = pymongo.Connection("localhost", 27017) # This creates a connection to local pymongo self.db = conn["blog"] if not self.db.posts.find_one({"title": "root"}): # If the blog is new installed root should not exist setup.setup(conn["blog"]) # and so it will be created handlers = routes sidebar = get_archive_and_categories(self.db) # generate the data for sidebar static_pages_list = get_static_pages_list(self.db) # and the static pages on navigation bar page_data["sidebar"] = sidebar # then fill the page_data page_data["static_pages_list"] = static_pages_list # It's expected that page_data will not change very often page_data["blog_title"] = conf.blog_title # Man! less database calls! settings = dict(template_path=os.path.join(os.path.dirname(__file__), "templates"), static_path=os.path.join(os.path.dirname(__file__), "static"), debug=conf.debug,) tornado.web.Application.__init__(self, handlers, **settings)
def app_factory(**local_conf): settings = setup.setup(**local_conf) app = App(settings) app = werkzeug.wsgi.SharedDataMiddleware(app, { '/css': os.path.join(settings.static_file_path, 'css'), '/js': os.path.join(settings.static_file_path, 'js'), '/img': os.path.join(settings.static_file_path, 'img'), }) return app
def main(): setup() try: print('Welcome to Jenkins CLI') while True: answer = _read_input() if answer.startswith('status'): status(answer) elif answer.startswith('build'): deploy(answer) elif answer.startswith('stop'): stop(answer) elif answer.startswith('exit'): raise ValueError('Seeya :)') else: print('action n0t found') except (KeyboardInterrupt, SystemExit, ValueError): print('Bye :)')
def main(): # Main loop brings all of the functionality together print "Initializing..." if SETUP == True: setup.setup() time.sleep(5) loopCount = 0 while True: setup.scan() data = readData() display(data) loopCount = loopCount +1 if loopCount >= loopLimit: if shutdown == True: followers.shutdownDisplay() print "SHUT DOWN!" os.system("sudo shutdown -h now") else: loopCount = 0
def prepareSourceTree(srcdir, customerRelease=False): if not customerRelease: # import setup.py from src/build_system buildsystemdir = os.path.join(srcdir, "build_system") if sys.path[0] != buildsystemdir: if buildsystemdir in sys.path: sys.path.remove(buildsystemdir) sys.path.insert(0, buildsystemdir) import setup setup.setup(srcdir) # Run autogen.sh if sys.platform != "win32": origDir = os.getcwd() # autogen.sh in the source dir try: utils.changeDir(srcdir) utils.runCommand('sh build_system/unix/autogen.sh') finally: utils.changeDir(origDir)
def initUI(self): self.setGeometry(500, 300, 800, 600) self.setWindowTitle('varDB GUI') #Create option buttons addButton=QtGui.QPushButton("Add Variable") addButton.pressed.connect(lambda: newSig(self)) self.saveButton=QtGui.QPushButton("Save Changes") self.deleteButton=QtGui.QPushButton("Delete Selected variable") quitButton=QtGui.QPushButton("Quit") quitButton.clicked.connect(QtCore.QCoreApplication.instance().quit) #Creates left and right sections, search bar, scroll area hbox = QtGui.QHBoxLayout(self) self.left = QtGui.QFrame(self) self.upright = QtGui.QFrame(self) self.downright = QtGui.QFrame(self) self=setup(hbox,self,"VDB.xml") self.searchText.textEdited.connect(lambda: generateButtons(self,str(self.searchText.text()),0)) self.setLayout(hbox) #Populates scroll area generateButtons(self,'',-1) #Option buttons layout hlox=QtGui.QHBoxLayout() hlox2=QtGui.QHBoxLayout() hlox.addStretch(1) hlox.addWidget(addButton,1) hlox.addWidget(self.saveButton,1) hlox.addWidget(self.deleteButton,1) hlox.addWidget(quitButton,1) hlox.addStretch() vbox=QtGui.QVBoxLayout() vbox.addStretch(1) vbox.addLayout(hlox) vbox.addLayout(hlox2) self.downright.setLayout(vbox)
from auth import getTwitter from setup import setup import time import datetime import csv import json import sqlite3 import signal import sys import os # create DB if does not exist if not os.path.isfile(os.getenv('PWD') + '/twitter.db'): print 'creating DB' setup() # Connect to DB conn = sqlite3.connect('twitter.db') def signal_handler(signal, frame): # Close connection on interrupt conn.close() sys.stdout.flush() sys.exit(0) signal.signal(signal.SIGINT, signal_handler) twitter = getTwitter()
def setUp(self): self.set = setup()
def app_factory(global_config, **local_conf): settings = setup.setup(**local_conf) return App(settings)
def main(): port = 9992 app = App(setup.setup()) return webserver(app, port)
# -*- Mode: Python -*- # vi:si:et:sw=4:sts=4:ts=4 # Flumotion - a streaming media server # Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L. # Copyright (C) 2010,2011 Flumotion Services, S.A. # All rights reserved. # # This file may be distributed and/or modified under the terms of # the GNU Lesser General Public License version 2.1 as published by # the Free Software Foundation. # This file is distributed without any warranty; without even the implied # warranty of merchantability or fitness for a particular purpose. # See "LICENSE.LGPL" in the source distribution for more information. # # Headers in this file shall remain intact. from twisted.internet import reactor import setup setup.setup() #from flumotion.common import log #log.logTwisted()
def setup_tex(cmdline): survey = model.survey.Survey.new(cmdline['project']) import setup return setup.setup(survey, cmdline)
def test_identifiString_bad(self): formatStrings = ["logs_%x", "logs_%X", "logs_%c"] set = setup() for format in formatStrings: self.assertEquals("", self.set.identifiString(format), "Setup, bad time string based log table nameing test failed!")
#!/usr/bin/env python # -*- coding: utf-8 -*- """ A local django test with synced database but empty tables. """ from setup import setup setup( path_info=False, extra_verbose=False, syncdb=True, insert_dump=False ) #______________________________________________________________________________ # Test: from django.db.models import get_apps, get_models for app in get_apps(): print "%s:" % app.__name__ for model in get_models(app): print " * %s" % model._meta.object_name print #from PyLucid.models import Plugin, Markup, PagesInternal # #plugin = Plugin.objects.create() #print "plugin ID:", plugin.id #
import SETTINGS import logging import sqlaload as sl from extract import extract from entities import create_entities, update_entities from load import load from setup import setup, make_grano from transform import transform from network_entities import update_network_entities if __name__ == '__main__': import sys logging.basicConfig(level=logging.DEBUG) assert len(sys.argv) == 3, "Usage: %s [ir_source_file] [ap_source_file]" ir_source_file = sys.argv[1] ap_source_file = sys.argv[2] engine = sl.connect(SETTINGS.ETL_URL) extract(engine, ir_source_file, ap_source_file) update_network_entities(engine, 'network_entities.csv') create_entities(engine) update_entities(engine, 'entities.csv') transform(engine) grano = make_grano() setup(engine, grano) load(engine, grano)
def do_setup(self, arg): '''Setup your virtualcloud account with Dropbox and/or Google Drive. Add and remove accounts. This must be run at least once for anything else to work. ''' setup()
def setup_handlers(self, map): """setup the mapper""" map.connect(None, "/css/{path_info:.*}", handler=CSSResourceHandler) map.connect(None, "/js/{path_info:.*}", handler=JSResourceHandler) map.connect(None, "/img/{path_info:.*}", handler=StaticHandler) map.connect(None, "/extensions/{path_info:.*}", handler=StaticHandler) map.connect(None, "/", handler=Page) map.connect(None, "/{page}", handler=Page) self.logger = Logger('app') self.pts = Environment(loader=PackageLoader("frontend","http/pages")) def main(): port = 7652 app = App(setup.setup()) return webserver(app, port) def frontend_factory(global_config, **local_conf): settings = setup.setup(**local_conf) return App(settings) def webserver(app, port): import wsgiref.simple_server wsgiref.simple_server.make_server('', port, app).serve_forever() if __name__=="__main__": main() else: settings = setup.setup() app = App(settings)
import os import re import sys import webbrowser from datetime import datetime as dt from setup import setup; setup() import framework from framework.TestCase import TestCase # Allow command-line selection of tests to run if len(sys.argv) > 1: tests_to_run = sys.argv[1:] framework.TESTS = filter(lambda x: x[0] in tests_to_run, framework.TESTS) # Time of test running_time = dt.now() # Report Name report_name = "report_%s.html" % running_time.strftime("%Y%m%d%H%M%S") # Open a fill in top part of reports template and # write it to results.html f = open(framework.ROOT_DIR + '/framework/assets/templates/top.html', 'r') top = f.read() % (running_time.strftime("%x at %X"), running_time.strftime("%x at %X")) #top = top.replace("{ROOT_DIR}", framework.ROOT_DIR) f.close() f = open(framework.REPORTS_DIR + report_name, 'w') f.write(top) f.close()
def run_exp(replace_params={}): # READ PARAMETERS AND DATA params = setup(replace_params) t1Data, t1Label, t2Data, t2Label, vData, vLabel, testD, testL = read_preprocess(params=params) np.savez('preprocessed_cifar.npz', X_train=t1Data, Y_train=t1Label, X_t2=t2Data, Y_t2=t2Label, X_v=vData, Y_v=vLabel, X_test=testD, Y_test=testL) return # random numbers rng = np.random.RandomState(params.seed) rstream = RandomStreams(rng.randint(params.seed+1)+1) ''' Construct Theano functions ''' # INPUTS useRglrz = T.fscalar('useRglrz') bnPhase = T.fscalar('bnPhase') if params.model == 'convnet': x = T.ftensor4('x') else: x = T.matrix('x') trueLabel = T.ivector('trueLabel') globalLR1 = T.fscalar('globalLR1') globalLR2 = T.fscalar('globalLR2') moment1 = T.fscalar('moment1') moment2 = T.fscalar('moment2') # NETWORK if params.model == 'convnet': model = convnet(rng=rng, rstream=rstream, x=x, wantOut=trueLabel, params=params, useRglrz=useRglrz, bnPhase=bnPhase) else: model = mlp(rng=rng, rstream=rstream, x=x, wantOut=trueLabel, params=params, useRglrz=useRglrz, bnPhase=bnPhase) # UPDATES updateT1, updateT2, updateC2grad, grads = updates(mlp=model, params=params, globalLR1=globalLR1, globalLR2=globalLR2, momentParam1=moment1, momentParam2=moment2) updateBN = [] if params.batchNorm: for param, up in zip(model.paramsBN, model.updateBN): updateBN += [(param, up)] updateT1 = theano.function( inputs = [x, trueLabel, globalLR1, moment1, useRglrz, bnPhase], outputs = [model.trainCost, model.guessLabel] + grads, updates = updateT1 + updateBN, # mode=theano.compile.MonitorMode(post_func=detect_nan), on_unused_input='ignore', allow_input_downcast=True) updateT2part1 = theano.function( inputs = [x, trueLabel, globalLR1, moment1, useRglrz, bnPhase], outputs = [model.trainCost, model.guessLabel] + grads, updates = updateC2grad, # mode=theano.compile.MonitorMode(post_func=detect_nan), on_unused_input='ignore', allow_input_downcast=True) updateT2part2 = theano.function( inputs = [x, trueLabel, globalLR1, moment1, globalLR2, moment2, useRglrz, bnPhase], outputs = [model.trainCost, model.guessLabel] + grads, updates = updateT2, # mode=theano.compile.MonitorMode(post_func=detect_nan), on_unused_input='ignore', allow_input_downcast=True) evaluate = theano.function( inputs = [x, trueLabel, useRglrz, bnPhase], outputs = [model.trainCost, model.guessLabel, model.penalty, model.netStats], on_unused_input='ignore', allow_input_downcast=True) evaluateBN = theano.function( inputs = [x, useRglrz, bnPhase], updates = updateBN, on_unused_input='ignore', # mode=theano.compile.MonitorMode(post_func=detect_nan), allow_input_downcast=True) ''' Inializations ''' # INITIALIZE # layers to be read from loopOver = range(params.nLayers) # initializing training values currentT2Batch = 0 # samples, batches per epoch, etc. nSamples1 = t1Data.shape[0] nVSamples, nTestSamples = [vData.shape[0], testD.shape[0]] nBatches1 = nSamples1 / params.batchSize1 # permutations testPerm = range(0, nTestSamples) train1Perm = range(0, nSamples1) if params.useT2: nSamples2 = t2Data.shape[0] train2Perm = range(0, nSamples2) nBatches2 = nSamples2 / params.batchSize2 # TRACKING # (1) best results bestVal = 1.; bestValTst = 1. # (2) errors tempError1, tempError2, tempCost1, tempCost2 = [[],[], [],[]] t1Error, t2Error, validError, testError = [[],[],[],[]] t1Cost, t2Cost, penaltyCost, validCost, testCost = [[],[],[],[],[]] # (3) activation statistics (per layer) trackTemplate = np.empty((0,params.nLayers), dtype = object) trackLayers = {} for stat in params.activTrack: trackLayers[stat] = trackTemplate # (4) penalty, noise, activation parametrization (per layer) penalList = ['L1', 'L2', 'Lmax', 'LmaxCutoff', 'LmaxSlope', 'LmaxHard'] noiseList = ['addNoise', 'inputNoise', 'dropOut', 'dropOutB'] sharedNames = [p.name for p in model.paramsT1] + [p.name for p in model.paramsT2] print sharedNames trackPenal = {}; trackPenalSTD = {} trackNoise = {}; trackNoiseSTD = {} trackGrads = {} track1stFeatures = [] trackRglrzTemplate = np.empty((0,len(loopOver)), dtype = object) for param in params.rglrz: if param in penalList: trackPenal[param] = trackRglrzTemplate trackPenalSTD[param] = trackRglrzTemplate if param in noiseList: trackNoise[param] = trackRglrzTemplate trackNoiseSTD[param] = trackRglrzTemplate # (5) other trackLR1, trackLR2 = [[],[]] params.halfLife = params.halfLife*10000./(params.maxEpoch*nBatches1) print 'number of updates total', params.maxEpoch*nBatches1 print 'number of updates within epoch', nBatches1 ''' Training!!! ''' lastUpdate = params.maxEpoch*nBatches1 - 1 try: t_start = time() # for i in range(0, params.maxEpoch*nBatches1): # i = nUpdates # EPOCHS currentEpoch = i / nBatches1 currentBatch = i % nBatches1 # batch order in the current epoch currentProgress = np.around(1.*i/nBatches1, decimals=4) ''' Learning rate and momentum schedules. ''' t = 1.*i/(params.maxEpoch*nBatches1) lr1 = np.asarray(params.learnRate1* lr_schedule(fun=params.learnFun1,var=t,halfLife=params.halfLife, start=0),theano.config.floatX) lr2 = np.asarray(params.learnRate2* lr_schedule(fun=params.learnFun2,var=t,halfLife=params.halfLife, start=params.triggerT2),theano.config.floatX) moment1 = np.asarray(params.momentum1[1] - (params.momentum1[1]-(params.momentum1[0]))* lr_schedule(fun=params.momentFun,var=t,halfLife=params.halfLife,start=0), theano.config.floatX) moment2 = np.asarray(params.momentum2[1] - (params.momentum2[1]-(params.momentum2[0]))* lr_schedule(fun=params.momentFun,var=t,halfLife=params.halfLife,start=0), theano.config.floatX) # PERMUTING T1 AND T2 SETS if currentBatch == 0: np.random.shuffle(train1Perm) if params.useT2 and (currentT2Batch == nBatches2 - 1) : np.random.shuffle(train2Perm) currentT2Batch = 0 ''' Update T1&T2 ''' # Update both if params.useT2: # make batches sampleIndex1 = train1Perm[(currentBatch * params.batchSize1): ((currentBatch + 1) * (params.batchSize1))] sampleIndex2 = train2Perm[(currentT2Batch * params.batchSize2): ((currentT2Batch + 1) * (params.batchSize2))] if (i % params.T1perT2 == 0) and ( i >= params.triggerT2): res = updateT2part1(t2Data[sampleIndex2], t2Label[sampleIndex2], lr1, moment1, 0, 1) (c2, y2, debugs) = (res[0], res[1], res[2:]) res = updateT2part2(t1Data[sampleIndex1], t1Label[sampleIndex1], lr1, moment1, lr2, moment2, 1, 0) (c1, y1, debugs) = (res[0], res[1], res[2:]) tempError2 += [1.*sum(t2Label[sampleIndex2] != y2) / params.batchSize2] tempCost2 += [c2] currentT2Batch += 1 if np.isnan(c1): print 'NANS in part 2!' if np.isnan(c2): print 'NANS in part 1!' else: res = updateT1(t1Data[sampleIndex1], t1Label[sampleIndex1], lr1, moment1, 1, 0) (c1, y1, debugs) = (res[0], res[1], res[2:]) tempError1 += [1.*sum(t1Label[sampleIndex1] != y1) / params.batchSize1] tempCost1 += [c1] if np.isnan(c1): print 'NANS!' # Update T1 only else: # make batch sampleIndex1 = train1Perm[(currentBatch * params.batchSize1): ((currentBatch + 1) * (params.batchSize1))] res = updateT1(t1Data[sampleIndex1], t1Label[sampleIndex1], lr1, moment1, 1, 0) (c1, y1, debugs) = (res[0], res[1], res[2:]) tempError1 += [1.*sum(t1Label[sampleIndex1] != y1) / params.batchSize1] tempCost1 += [c1] if np.isnan(c1): print 'NANS', c1 ''' Evaluate test, store results, print status. ''' if np.around(currentProgress % (1./params.trackPerEpoch), decimals=4) == 0 \ or i == lastUpdate: # batchnorm parameters: estimate for the final model if (params.batchNorm and (currentEpoch > 1)) \ and ((currentEpoch % params.evaluateTestInterval) == 0 or i == lastUpdate) \ and params.testBN != 'lazy': model = update_bn(model, params, evaluateBN, t1Data, t1Label) # # EVALUATE: validation set # allVar = evaluate(vData[:2], vData, vLabel[:2], vLabel, 1) # cV, yTest, _ , _ = allVar[0], allVar[1], allVar[2], allVar[3], allVar[4:] # #cV, yTest = allVar[0], allVar[1] # tempVError = 1.*sum(yTest != vLabel) / nVSamples # tempVError = 7.; cV = 7. ''' EVALUATE: test set - in batches of 1000, ow too large to fit on gpu - using dummy input in place of regularized input stream (Th complains ow) - graph = 1, hence BN constants do not depend on regularized input stream (see batchnorm.py) ''' if params.model == 'mlp': nTempSamples = 5000 else: nTempSamples = 1000 tempError = 0.; tempCost = 0.; batchSizeT = nTestSamples / 10 if currentEpoch < 0.8*params.maxEpoch: np.random.shuffle(testPerm) tempIndex = testPerm[:nTempSamples] cT, yTest, p, stats = evaluate(testD[tempIndex], testL[tempIndex], 0, 1) tempError = 1.*sum(yTest != testL[tempIndex]) / nTempSamples else: for j in range(10): tempIndex = testPerm[j*batchSizeT:(j+1)*batchSizeT] cT, yTest, p, stats = evaluate(testD[tempIndex], testL[tempIndex], 0, 1) tempError += 1.*sum(yTest != testL[tempIndex]) / batchSizeT tempCost += cT tempError /= 10. cT = tempCost / 10. ''' TRACK: class errors & cost ''' # note: T1 and T2 errors are averaged over training, hence initially can not be compared to valid and test set t1Error += [np.mean(tempError1)]; t1Cost += [np.mean(tempCost1)] if params.useT2: t2Error += [np.mean(tempError2)]; t2Cost += [np.mean(tempCost2)] testError += [tempError]; testCost += [cT] penaltyCost += [p] #validError += [tempVError] # RESET tracked errors tempError1 = []; tempCost1 = [] tempError2 = []; tempCost2 = [] ''' TRACK: T2 parameter statistics & learning rates ''' # monitoring T2 values if params.useT2: trackNoise, trackPenal = t2_extract(model, params, trackNoise, trackPenal) # monitoring activations if params.trackStats: trackLayers = stat_extract(stats, params, trackLayers) # monitoring gradients if params.trackGrads: trackGrads = grad_extract(debugs, params, sharedNames, trackGrads) # monitoring log learning rates trackLR1 += [lr1] trackLR2 += [lr2] ''' STATUS print ''' if params.useT2 and ((currentEpoch % params.printInterval) == 0 or (i == params.maxEpoch*nBatches1 - 1)): print currentEpoch, ') time=%.f T1 | T2 | test | penalty ' % ((time() - t_start)/60) print 'ERR %.3f | %.3f | %.3f | - ' % ( t1Error[-1]*100, t2Error[-1]*100, testError[-1]*100) print 'COSTS %.3f | %.3f | %.3f | %.3f ' % ( t1Cost[-1], t2Cost[-1], testCost[-1], penaltyCost[-1]) print 'Log[learningRates] ', np.log10(lr1), 'T1 ', np.log10(lr2), 'T2' for param in params.rglrzTrain: if param in penalList: print param, trackPenal[param][-1] if param in noiseList: print param, trackNoise[param][-1] if ((currentEpoch % params.printInterval) == 0 or (i == params.maxEpoch*nBatches1 - 1)): print currentEpoch, 'TRAIN %.2f TEST %.2f time %.f' % ( t1Error[-1]*100, testError[-1]*100, ((time() - t_start)/60)) print 'Est. time till end: ', (((time() - t_start)/60) / (currentEpoch+1))*(params.maxEpoch - currentEpoch) except KeyboardInterrupt: pass time2train = (time() - t_start)/60 ''' Prepare variables for output. ''' if params.useT2: lastT2 = t2Error[-1] allErrors = np.concatenate(([t1Error], [t2Error], [testError]), axis = 0) allCosts = np.concatenate(([t1Cost], [t2Cost], [testCost], [penaltyCost]), axis = 0) outParams = {} for param in params.rglrz: if param in penalList: outParams[param] = trackPenal[param][-1] if param in noiseList: outParams[param] = trackNoise[param][-1] else: print 'param not tracked, fix!' else: lastT2 = 0. allErrors = np.concatenate(([t1Error], [testError]), axis = 0) allCosts = np.concatenate(([t1Cost], [testCost]), axis = 0) outParams = {} for param in params.rglrz: outParams[param] = params.rglrzInitial[param] modelName = 'pics/' best = min(testError) modelName += str(params.nLayers-1)+'x'+str(params.model)+'_best:'+str(best)+'.pdf' # saved for plot data = { #'setup' : params, 'modelName' : modelName, 'best' : best, 'lastEpoch' : (currentEpoch+1), 'paramsTrained' : params.rglrzTrain, 'allErrors': allErrors, 'allCosts': allCosts, 'trackLayers': trackLayers, 'trackPenal': trackPenal, 'trackNoise': trackNoise, 'trackFeatures': track1stFeatures, 'trackPenalSTD': trackPenalSTD, 'trackNoiseSTD': trackNoiseSTD, 'trackGrads': trackGrads, 'trackLR1': trackLR1, 'trackLR2': trackLR2, 'outParams': outParams, } import pickle; file = open(params.saveName,'wb'); pickle.dump(data, file); file.close() # prepared for return results = {'bestVal': bestVal, # which could be validation or T2 'bestValTest': best, 'lastT1': t1Error[-1], 'lastT2': lastT2, 'lastVal': None,#validError[-1], 'lastTest':testError[-1], 'outParams': outParams, 'trackGrads': trackGrads, 'trackPenal': trackPenal, 'trackNoise': trackNoise, 'setup' : params, 'lastCTest': testCost[-1], 'lastCT1': t1Cost[-1], 'trainTime': time2train, } return results