def onStartBtn(self, event): self.onSaveSettingsBtn(event) basepath = self.gallery_path.GetValue() if basepath == None or basepath == "": wx.MessageDialog(self, "No gallery path given, cannot proceed without one.", "", wx.OK ).ShowModal() else: # self.exitBtn.Enable(enable=False) launch.main(basepath=basepath)
#!/usr/bin/python import os, sys path = "None" sys.path.append(path) import launch os.chdir(path) launch.main()
init initialize a MicroStack node add-compute generate a connection string for a node to join the cluster launch launch a virtual machine ''') parser.add_argument('command', help='A subcommand to run:\n' ' {init, launch, add-compute}') args = parser.parse_args(sys.argv[1:2]) COMMANDS = { 'init': init.main.init, 'add-compute': cluster.add_compute.main, 'launch': launch.main.main } cmd = COMMANDS.get(args.command, None) if cmd is None: parser.print_help() raise Exception('Unrecognized command') # TODO: Implement this properly via subparsers and get rid of # extra modules. sys.argv[0] = sys.argv[1] # Get rid of the command name in the args and call the actual command. del (sys.argv[1]) cmd() if __name__ == '__main__': main()
def run(workout): workdir = workout name = os.path.basename(workdir) chartdir = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'chart') try: if not os.path.exists(os.path.join(workdir, '('+name+')'+'performace.xls')): wb = open_workbook(os.path.join(chartdir, 'performance.xls'), formatting_info=True) rb = copy(wb) rb.save(os.path.join(workdir, '('+name+')'+'performance.xls')) else: print 'file already exists' except KeyboardInterrupt: pass try: if os.path.exists(os.path.join(workdir, 'scenes')): scenesgpuchart.main(workdir) gfxinfo.main(workdir, name) skinfo.main(workdir, name) except KeyboardInterrupt: pass try: if os.path.exists(os.path.join(workdir, 'memory')): memory.main(workdir, name) memchart.getData(workdir) except KeyboardInterrupt: pass try: if os.path.exists(os.path.join(workdir, 'monkey')): if os.path.exists(os.path.join(os.path.join(workdir, 'monkey'), 'monkey.csv')): allreader.main(workdir, name) else: monkeygpuchart.main(workdir) monkeymemchart.getData(workdir) alonemonkey.main(workdir, name) monkeygfx.main(workdir, name) except KeyboardInterrupt: pass try: if os.path.exists(os.path.join(workdir, 'stress.csv')): stress.main(workdir, name) except KeyboardInterrupt: pass try: if os.path.exists(os.path.join(workdir, 'compat.csv')): compat.main(workdir, name) except KeyboardInterrupt: pass try: if os.path.exists(os.path.join(workdir, 'launch.csv')): launch.main(workdir, name) except KeyboardInterrupt: pass try: if os.path.exists(os.path.join(workdir, 'uptime.csv')): uptime.main(workdir, name) except KeyboardInterrupt: pass try: hypelink.main(workdir, name) print 'ok' except KeyboardInterrupt: pass print 'alltest finish'
raise ValueError(f"Task {sm_args.nlp_problem} is not supported.") return task_path, transformer_args if __name__ == "__main__": # Get initial configuration to select appropriate HuggingFace task and its configuration print('Starting training...') parser = ArgumentParser() parser.add_argument('--nlp-problem', type=str, default="language-modeling", help="Define NLP problem to run from HuggingFace example library. See for options: \ https://github.com/huggingface/transformers/tree/master/examples#the-big-table-of-tasks.") parser.add_argument('--dataset', type=str, default=None, help="Define which dataset to use.") sm_args, transformer_args = parser.parse_known_args() # Get task script and its cofiguration task_script, transformer_args = task_selector(sm_args, transformer_args) # Derive parameters of distributed training cluster in Sagemaker world = get_training_world() # Creates launch configuration according to PyTorch Distributed Launch utility requirements: # https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py launch_config = ["--nnodes", str(world['number_of_machines']), "--node_rank", str(world['machine_rank']), "--nproc_per_node", str(world['number_of_processes']), "--master_addr", world['master_addr'], "--master_port", world['master_port']] # Launch distributed training. Note, that launch script configuration is passed as script arguments sys.argv = [""] + launch_config + [task_script] + transformer_args launch.main()