def run_bench(checkpoint, dataset): path_pattern = os.path.join(dataset, "%s.zip") model_pattern = "%s.json" for train, test, model in find_todos(checkpoint): print("Start job. Train: %s, Test: %s, Model: %s" % (train, test, model)) train_path = path_pattern % train if not os.path.isfile(train_path): print("Cannot start training. %s doesn't exist." % train_path) continue test_path = path_pattern % test if not os.path.isfile(test_path): print("Cannot start testing. %s doesn't exist." % test_path) continue model_path = model_pattern % model if not os.path.isfile(model_path): print("Cannot load model. %s doesn't exist." % model_path) continue with open(model_path, "r") as i: config = json.load(i) config = adjust_config(config, train_path) exc.run(config, train_path, test_path, checkpoint=os.path.join(checkpoint, train))
def openbw(model1_data, model2_data, strain1_id, strain2_id, generation_id): print("[Model] Writing models") write_model(strain1_id, model1_data) write_model(strain2_id, model2_data) model1_env = { "OPENBW_LAN_MODE": "LOCAL", "OPENBW_LOCAL_PATH": f"{args.name}_local", "BWAPI_CONFIG_AUTO_MENU__CHARACTER_NAME": f"{args.name}_{generation_id}_{strain1_id}_{strain2_id}" } model2_env = { "OPENBW_LAN_MODE": "LOCAL", "OPENBW_LOCAL_PATH": f"{args.name}_local", "BWAPI_CONFIG_AUTO_MENU__CHARACTER_NAME": f"{args.name}_{generation_id}_{strain2_id}_{strain1_id}" } print("[Model] Launching openbw") p1 = run(['./BWAPILauncher'], model1_env) p2 = run(['./BWAPILauncher'], model2_env) try: p1.wait(args.timeout) p2.wait(args.timeout) safe_unlink(os.path.join(CWD, f"{args.name}_local")) except: safe_unlink(os.path.join(CWD, f"{args.name}_local")) p1.kill() p2.kill() raise Exception("No pants!") r1 = load_result(strain1_id) r2 = load_result(strain2_id) return (r1, r2)
def post(self): """ Upload file to input directory, Params-> file (Attach file in client request) """ execute_params = self.get_execute_params() run(execute_params) return redirect("/report/show")
def parse_and_use_sys_args(args): parser = argparse.ArgumentParser( "The Renal Pipeline's runtime execution arguments.") parser.add_argument( "--runtime_provider", required=True, help="A string value representing the path to the resource provider.") parser.add_argument( "--source_directory", required=True, help= "The directory in which the inputs for a single unit of work can be found across all resources (i.e. the run folder for a sequencing run. It is expected that all ancillary information is also present within this folder." ) parser.add_argument( "--scripts_directory", default=None, help= "The directory in which the scripts for the pipeline are located across all resources. If this value is not provided, it is expected that all relevant scripts can be found in the current working directory." ) parser.add_argument( "--pipeline_controller", default=None, help= "The path to the pipeline controller script across all resources. This script will be provided with the current engine object via the function execute_pipeline(engine). If the script is not provided, it is assumed that it will be present within the scripts directory and called controller.py" ) parser.add_argument( "--working_directory", default=None, help="The path to your working directory across all resources.") parser.add_argument( "--output_directory", default=None, help="The path to your output directory across all resources.") args = parser.parse_args(args) run(runtime_provider=args.runtime_provider, source_directory=args.source_directory, scripts_directory=args.scripts_directory, pipeline_controller=args.pipeline_controller, working_directory=args.working_directory, output_directory=args.output_directory)
def execute(): log.info("Audit-1 Clicked") if request.method == 'GET': return render_template('execute.html', progress="-") elif request.method == 'POST': log.info(f"Audit-1 Invoked with files") reset_logs() files = {} for key in request.files: file = request.files[key] log.info(f"{file.filename}") if file and allowed_file(file.filename): file.save( os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(file.filename))) files[file.filename] = os.path.join( app.config['UPLOAD_FOLDER'] + '\\' + file.filename) log.info( f'file : {file.filename} saved to {app.config["UPLOAD_FOLDER"]}' ) else: flash(f'Allowed file type : {ALLOWED_EXTENSIONS}') return redirect(request.url) if len(files) == 2: filenames = list(files.keys()) log.info(f'Auditing files : {filenames[0]} & {filenames[1]}') output_file = run(files[filenames[0]], files[filenames[1]]) runtime_log = get_cache() return render_template('audit.html', file_name=output_file['file_name'],\ path=output_file['path'], progress="Done!!",\ runtime_log=runtime_log) else: flash('Same file names') return redirect(request.url)
def pemfile(request): run_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' dump_key' \ ' %(cluster-id)s' run_command = run_command % request logger.info('Executing: %s', run_command) response = execute.run(run_command) return response
def index(): if request.method == 'POST': #extrayendo codigo Code = request.form['code'] #creando archivo con el codigo file(Code) #executandolo Code = run() #generando archivo de salida file2(Code) #Extrayendo datos del archivo data = extract() return render_template('index.html', RESULT=data) else: return render_template('index.html', RESULT="Sin Compilar")
def start(postvars, reservation_id): ec2.tag_reservation(reservation_id, 'status', 'Starting %(product-name)s...' % postvars) start_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' start' \ ' %(full_name)s' \ ' %(product-name)s' start_command = start_command % postvars flash(start_command) logger.info('Executing: %s', start_command) response = execute.run(start_command) if response.stderr: return response
def start_opscenter(postvars, reservation_id): if postvars['opscenter-install'] == 'yes': ec2.tag_reservation(reservation_id, 'status', 'Starting opscenter...') start_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' start' \ ' %(full_name)s' \ ' opscenter' start_command = start_command % postvars flash(start_command) logger.info('Executing: %s', start_command) response = execute.run(start_command) if response.stderr: return response
def install_opscenter(postvars, reservation_id): if postvars['opscenter-install'] == 'yes': ec2.tag_reservation(reservation_id, 'status', 'Installing opscenter...') install_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo staging' \ ' --version_or_branch %(opscenter-version)s' \ ' %(full_name)s' \ ' opscenter' install_command = install_command % postvars flash(install_command) logger.info('Executing: %s', install_command) response = execute.run(install_command) if response.stderr: return response
def start_agent(postvars, reservation_id): if postvars['opscenter-install'] == 'yes': ec2.tag_reservation(reservation_id, 'status', 'Starting DataStax Agents...') start_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' run' \ ' %(full_name)s' \ ' all' \ ' "sudo service datastax-agent start"' start_command = start_command % postvars flash(start_command) logger.info('Executing: %s', start_command) response = execute.run(start_command) if response.stderr: return response
def launch(postvars): # TODO: Use automaton library, not command line # currently chose shell commands to teach presales ctool in a more # relatable fashion launch_command = 'ctool' \ ' --log-dir automaton_logs/%(clean_email)s' \ ' --log-file %(log_file)s' \ ' --provider %(cloud-option)s' \ ' launch' \ ' --instance-type %(instance-type)s' \ ' --platform %(platform)s' \ ' --tags \'%(tags)s\'' \ ' %(full_name)s' \ ' %(num_nodes)s' launch_command = launch_command % postvars flash(launch_command) logger.info('Executing: %s', launch_command) response = execute.run(launch_command) if response.stderr: return response
def set(self): '''Trigger all environment checks''' # Check for Intel and export INTEL_BATCH if 'Intel' in self.glxinfo: print ' * Intel detected, exporting: INTEL_BATCH=1' os.environ['INTEL_BATCH'] = '1' # Check TFP and export LIBGL_ALWAYS_INDIRECT if needed if self.tfp != 'direct': print ' * No %s with direct rendering context' % tfp if self.tfp == 'indirect': print ' ... present with indirect rendering, exporting: LIBGL_ALWAYS_INDIRECT=1' os.environ['LIBGL_ALWAYS_INDIRECT'] = '1' else: # If using Xgl with a proprietary driver, exports LD_PRELOAD=<mesa libGL> if self.Xgl and self.glx_vendor != 'SGI': print ' * Non-mesa driver on Xgl detected' from data import mesa_libgl_locations location = [ l for l in mesa_libgl_locations if os.path.exists(l) ] if location: print ' ... exporting: LD_PRELOAD=%s' % location[0] os.environ['LD_PRELOAD'] = location[0] if run(['glxinfo'], 'output').count(tfp) >= 3: self.tfp = 'direct' else: # kindly let the user know... but don't abort (maybe it will work :> ) print ' ... no mesa libGL found for preloading, this may not work!' else: print ' ... nor with indirect rendering, this isn\'t going to work!' # Check for nvidia on Xorg if not self.Xgl and self.glx_vendor == 'NVIDIA Corporation': print ' * NVIDIA on Xorg detected, exporting: __GL_YIELD=NOTHING' os.environ['__GL_YIELD'] = 'NOTHING'
def post(self): data = Cmd.parser.parse_args() cmd = data['cmd'] std_path = data['std_path'] output_path = data['output_path'] module = data['module'] activity = { 'cmd': cmd, 'std_path': std_path, 'output_path': output_path, 'status': 'Running' } # this if activities_log.get(module): activities_log[module].append(activity) else: activities_log[module] = [activity] utils.print_info("Execute: {0} ".format(cmd)) stdout = execute.run(cmd) # just ignore for testing purpose # stdout = "<< stdoutput >> << {0} >>".format(cmd) if std_path != '': utils.just_write(std_path, stdout) utils.check_output(output_path) # change status of log # activity['status'] = 'Done' for item in activities_log[module]: if item['cmd'] == cmd: if stdout is None: item['status'] = 'Error' item['status'] = 'Done' return jsonify(status="200", output_path=output_path)
def set(self): '''Trigger all environment checks''' # Check for Intel and export INTEL_BATCH if 'Intel' in self.glxinfo: print ' * Intel detected, exporting: INTEL_BATCH=1' os.environ['INTEL_BATCH'] = '1' # Check TFP and export LIBGL_ALWAYS_INDIRECT if needed if self.tfp != 'direct': print ' * No %s with direct rendering context' %tfp if self.tfp == 'indirect': print ' ... present with indirect rendering, exporting: LIBGL_ALWAYS_INDIRECT=1' os.environ['LIBGL_ALWAYS_INDIRECT'] = '1' else: # If using Xgl with a proprietary driver, exports LD_PRELOAD=<mesa libGL> if self.Xgl and self.glx_vendor != 'SGI': print ' * Non-mesa driver on Xgl detected' from data import mesa_libgl_locations location = [l for l in mesa_libgl_locations if os.path.exists(l)] if location: print ' ... exporting: LD_PRELOAD=%s' %location[0] os.environ['LD_PRELOAD'] = location[0] if run(['glxinfo'], 'output').count(tfp) >= 3: self.tfp = 'direct' else: # kindly let the user know... but don't abort (maybe it will work :> ) print ' ... no mesa libGL found for preloading, this may not work!' else: print ' ... nor with indirect rendering, this isn\'t going to work!' # Check for nvidia on Xorg if not self.Xgl and self.glx_vendor == 'NVIDIA Corporation': print ' * NVIDIA on Xorg detected, exporting: __GL_YIELD=NOTHING' os.environ['__GL_YIELD'] = 'NOTHING'
def install(postvars, reservation_id): ec2.tag_reservation(reservation_id, 'user', remove=True) ec2.tag_reservation(reservation_id, 'status', 'Installing %(product-name)s...' % postvars) postvars['spark_hadoop'] = '--spark-hadoop' \ if 'spark-and-hadoop' in postvars else '' if len(postvars['advanced_nodes']['cluster']['nodes']) == 0: # calculate install values postvars['percent_analytics'] = float(postvars['hadoop-nodes']) / \ postvars['num_nodes'] postvars['percent_search'] = float(postvars['search-nodes']) / \ postvars['num_nodes'] postvars['percent_spark'] = float(postvars['spark-nodes']) / \ postvars['num_nodes'] install_command = 'ctool ' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo staging' \ ' --percent-analytics %(percent_analytics)s' \ ' --percent-search %(percent_search)s' \ ' --percent-spark %(percent_spark)s' \ ' %(spark_hadoop)s' \ ' --version_or_branch %(dse-version)s' \ ' --num-tokens %(num-of-tokens)s' \ ' %(full_name)s' \ ' %(product-name)s' install_command = install_command % postvars logger.info('Executing: %s', install_command) response = execute.run(install_command) flash(install_command) else: with NamedTemporaryFile() as f: postvars['config_file'] = f.name f.write( json.dumps(postvars['advanced_nodes'], indent=4, sort_keys=True)) f.flush() install_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo staging' \ ' --config-file %(config_file)s' \ ' %(spark_hadoop)s' \ ' --version_or_branch %(dse-version)s' \ ' --num-tokens %(num-of-tokens)s' \ ' %(full_name)s' \ ' %(product-name)s' install_command = install_command % postvars logger.info('Executing: %s', install_command) logger.debug('With config-file: \n%s', f.read()) response = execute.run(install_command) flash(install_command) flash('--config-file: %s' % json.dumps(postvars['advanced_nodes'])) if response.stderr: return response
def post(self, workspace): ws_name = utils.get_workspace(workspace=workspace) options_path = current_path + \ '/storages/{0}/options.json'.format(ws_name) self.options = utils.reading_json(options_path) data = Cmd.parser.parse_args() cmd = data['cmd'] std_path = data['std_path'] output_path = data['output_path'] module = data['module'] nolog = data['nolog'] activity = { 'cmd': cmd, 'std_path': std_path, 'output_path': output_path, 'status': 'Running' } if nolog == 'False': activities_path = current_path + '/storages/{0}/activities.json'.format( ws_name) # activities = utils.reading_json(activities_path) activities = utils.reading_json(activities_path) if activities.get(module): activities[module].append(activity) else: activities[module] = [activity] utils.just_write(activities_path, activities, is_json=True) slack.slack_noti('log', self.options, mess={ 'title': "{0} | {1} | Execute".format( self.options['TARGET'], module), 'content': '```{0}```'.format(cmd), }) utils.print_info("Execute: {0} ".format(cmd)) stdout = execute.run(cmd) utils.check_output(output_path) # just ignore for testing purpose # stdout = "<< stdoutput >> << {0} >>".format(cmd) if nolog == 'False': # change status of log activities = utils.reading_json(activities_path) for item in activities[module]: if item['cmd'] == cmd: if stdout is None: item['status'] = 'Error' else: item['status'] = 'Done' try: if std_path != '': utils.just_write(std_path, stdout) slack.slack_file( 'std', self.options, mess={ 'title': "{0} | {1} | std".format( self.options['TARGET'], module), 'filename': '{0}'.format(std_path), }) if output_path != '': slack.slack_file( 'verbose-report', self.options, mess={ 'channel': self.options['VERBOSE_REPORT_CHANNEL'], 'filename': output_path }) except: pass utils.just_write(activities_path, activities, is_json=True) return jsonify(status="200", output_path=output_path)
def launch(self): print ' * Launching %s' %self.label run(self.command)
def is_running(app): 'Use pgrep to determine if an app is running' if run(['pgrep', app], 'call', quiet=True) == 0: return True
def __init__(self, data): print ' * Searching for installed applications...' ### Compiz Detection bins = {} for name in ('compiz', 'compiz.real'): bin = run(['which', name], 'output') if bin: bins[name] = bin if 'compiz' in bins and 'compiz.real' in bins: if bins['compiz'].split(os.sep)[:-1] == bins['compiz.real'].split(os.sep)[:-1]: compiz = 'compiz.real' else: compiz = 'compiz' elif 'compiz.real' in bins: compiz = 'compiz.real' elif 'compiz' in bins: compiz = 'compiz' else: compiz = None output = '' for name in bins: if len(bins) > 1 and name == compiz: selected = ' <*>' else: selected = '' output += ' -- %s%s' %(bins[name], selected) ### Everything Else self.wms = data.wms.copy() for wm in data.wms: which = run(['which', data.wms[wm][0]], 'output') if which: output += ' -- %s' %which else: del self.wms[wm] if compiz: data.compiz_args.insert(0, compiz) self.wms['compiz'] = (compiz, data.compiz_args, 'Compiz', None, None, None) self.decorators = data.decorators.copy() for decorator in data.decorators: which = run(['which', data.decorators[decorator][0]], 'output') if which: output += ' -- %s' %which else: del self.decorators[decorator] self.apps = data.apps.copy() for app in data.apps: which = run(['which', data.apps[app][0]], 'output') if which: output += ' -- %s' %which else: del self.apps[app] if parser_options.verbose: print output.rstrip() compiz_optionlist = [] self.options = data.options.copy() if compiz: compiz_help = run([compiz, '--help'], 'output') for item in compiz_help.split(): item = item[1:].replace(']', '') if item.startswith('--'): compiz_optionlist.append(item) for option in data.options: if data.options[option][1] not in compiz_optionlist: del self.options[option]
lable = Variable((None, 1), 'lable') l1 = Dense(data, 2, 'full1', activation='sigmoid') l2 = Dense(l1, 1, 'full2', activation='tanh') l2_loss = L2_norm(lable, l2, 'loss') label_val = np.asarray([[-1], [1], [1], [-1]]) data_val = np.asarray([ [0, 0], [0, 1], [1, 0], [1, 1] ]) for i in xrange(100): execute.run([l2_loss], feed_dict={data: data_val, lable: label_val}) l2_loss.backward() l2_loss.update() print execute.run([l2], feed_dict={data: data_val}) lable = Variable((None, 1), 'lable') l1 = Dense(data, 2, 'full1', activation='sigmoid') l2 = Dense(l1, 1, 'full2', activation='sigmoid') l2_loss = L2_norm(lable, l2, 'loss') label_val = np.asarray([[0], [1], [1], [0]]) data_val = np.asarray([ [0, 0],
def start(self): 'Start the active window manager' self.__set_old() if self.active == 'compiz' and self.old and self[self.old].killcmd: if run(['which', self[self.old].killcmd[0]], 'call', quiet=True) == 0: run(self[self.old].killcmd, 'call') time.sleep(1) if self.active and self.old and 'noreplace' in self[self.active].flags: run(['killall', self[self.old].base], 'call') time.sleep(1) if self.active == 'compiz': # use a copy, not the original compiz_command = self['compiz'].command[:] for option in options: if options[option].enabled: if options[option].switch is not None: compiz_command.append(options[option].switch) kill_list = ['killall'] for decorator in decorators: kill_list.append(decorators[decorator].base) run(kill_list, 'call') time.sleep(0.5) # do it print ' ... executing:', ' '.join(compiz_command) run(compiz_command, quiet=False) if "reload mate panel" in options and options["reload mate panel"].enabled and psutil is not None: # Reload mate-panel, if requested and running pname = lambda p : p.name if type(p.name) == str else p.name() # for psutil <=1.2 if [ p for p in psutil.process_iter() if pname(p) == "mate-panel" ]: print " * Reloading mate-panel" run(["mate-panel", "--replace"], "spawn", True) else: print " * mate-panel reload is enabled, but panel is not running" elif self.active: run(self[self.active].command) else: print ' * No active WM set; not going to do anything.'
def __init__(self): '''desktop: current desktop enviroment used to choose interface, fallback wm, and default decorator failsafe: boolean, True if in a failsafe session, currently only supports gnome failsafe mode. glxinfo: output of glxinfo command indirect_glxinfo: output of glxinfo with LIBGL_ALWAYS_INDIRECT xvinfo: output of xvinfo glx_vendor: 'client glx vendor:' usually one of SGI (for mesa-based drivers), NVIDIA Corporation, or ATI. tfp: 'direct' if texture_from_pixmap is present with direct rendering (implying presence with indirect as well), 'indirect' if only present with indirect context, False if not present at all Xgl: True in Xgl''' # Check gnome- and kde-specific vars, then try generic 'DESKTOP_SESSION' if GDSID in os.environ: self.desktop = 'gnome' elif 'KDE_FULL_SESSION' in os.environ: self.desktop = 'kde' else: self.desktop = os.environ.get('DESKTOP_SESSION', 'unknown') self.failsafe = False if self.desktop == 'gnome' and GDSID in os.environ and os.environ[GDSID] == 'failsafe': self.failsafe = True if self.failsafe: failsafe_str = 'failsafe ' else: failsafe_str = '' # hack to eliminate inconsistency if self.desktop == 'kde4': self.desktop = 'kde' print ' * Detected Session: %s%s' %(failsafe_str, self.desktop) ## Save the output of glxinfo and xvinfo for later use: # don't try to run glxinfo unless it's installed if run(['which', 'glxinfo'], 'call', quiet=True) == 0: self.glxinfo = run('glxinfo', 'output') else: raise SystemExit, ' * Error: glxinfo not installed!' # make a temp environment indirect_environ = os.environ.copy() indirect_environ['LIBGL_ALWAYS_INDIRECT'] = '1' self.indirect_glxinfo = run('glxinfo', 'output', env=indirect_environ) if run(['which', 'xvinfo'], 'call', quiet=True) == 0: self.xvinfo = run('xvinfo', 'output') else: raise SystemExit, ' * Error: xvinfo not installed!' self.glx_vendor = None line = [l for l in self.glxinfo.splitlines() if 'client glx vendor string:' in l] if line: self.glx_vendor = ' '.join(line[0].split()[4:]) ## Texture From Pixmap / Indirect self.tfp = False if self.glxinfo.count(tfp) < 3: if self.indirect_glxinfo.count(tfp) == 3: self.tfp = 'indirect' else: self.tfp = 'direct' ## Xgl if 'Xgl' in self.xvinfo: self.Xgl = True else: self.Xgl = False
def overmind(args): return run(['./Overmind'] + args)
def main(cargs): # folder from where dude is called cfolder = os.getcwd() # parse command line (options, cargs) = parser.parse_args(cargs) # check if a command has been given if cargs == []: parser.print_help() sys.exit() # create requires no Dudefile, so we deal with it right here if cargs[0] == "create": if len(cargs) < 2: expgen.create() else: expgen.create(cargs[1]) sys.exit(0) # all other commands require a Dudefile, so we first load it (in "cfg") cfg = None # use a given dudefile in options if options.expfile != None: try: cfg = imp.load_source('', options.expfile) except IOError: print >> sys.stderr, 'ERROR: Loading', options.expfile, 'failed' parser.print_help() sys.exit(1) else: # try default file names current = os.getcwd() max_folder = 10 # arbitrary number of parent directories i = 0 while i < max_folder: for f in ['desc.py', 'dudefile', 'Dudefile', 'dudefile.py']: try: if os.path.exists(f) and i > 0: print "Opening Dudefile: ", os.path.abspath(f) cfg = imp.load_source('', f) break except IOError: pass if cfg != None: break else: i += 1 parent, last = os.path.split(current) os.chdir(parent) current = parent if cfg == None: print >> sys.stderr, 'ERROR: no dudefile found' parser.print_help() sys.exit(1) # add to actual folder as root in cfg cfg.root = os.getcwd() # check if cfg can be used for core functions core.check_cfg(cfg) # check if cfg can be used for summaries summary.check_cfg(cfg) # parse arguments to module if options.margs: margs = args.parse(";".join(options.margs)) print "Passing arguments:", margs args.set_args(cfg, margs) if hasattr(cfg, 'dude_version') and cfg.dude_version >= 3: dimensions.update(cfg) # collect filters filters = [] if options.filter and options.filter != []: for fi in options.filter: for f in fi.split(','): filters.append(cfg.filters[f]) if options.filter_inline and options.filter_inline != []: filters += filt.create_inline_filter(cfg, options.filter_inline) if options.filter_path: current = os.getcwd() if current != cfolder: # this assumes Dudefile is in the root of the experiment folder os.chdir(cfolder) path = os.path.abspath(options.filter_path) os.chdir(current) path = os.path.relpath(path) # get raw_output_dir/exp_... format else: path = options.filter_path filters += filt.filter_path(cfg, path) # get experiments experiments = core.get_experiments(cfg) # select the set of experiments to be considered (successful, # failed or pending) if (options.success and options.failed and options.pending) or\ not (options.success or options.failed or options.pending): pass else: failed, pending = core.get_failed_pending_exp(cfg, experiments) expin = [] expout = [] if options.failed: expin += failed else: expout += failed if options.pending: expin += pending else: expout += pending if options.success: experiments = [exp for exp in experiments if exp not in expout] else: experiments = expin # apply filters if filters != []: experiments = filt.filter_experiments(cfg, filters, options.invert, False, experiments) cmd = cargs[0] if cmd == 'run': if options.force: clean.clean_experiments(cfg, experiments) execute.run(cfg, experiments, options) elif cmd == 'run-once': assert len(experiments) == 1 optpt = experiments[0] folder = "once" utils.checkFolder(folder) # create if necessary if options.force: clean.clean_experiment(folder) execute.execute_isolated(cfg, optpt, folder, options.show_output) elif cmd == 'sum': summary.summarize(cfg, experiments, cargs[1:], options.backend, options.ignore_status) elif cmd == 'list': for experiment in experiments: if options.dict: print "experiment:", experiment else: print core.get_folder(cfg, experiment) elif cmd == 'failed': failed = core.get_failed(cfg, experiments, False) for ffile in failed: print ffile elif cmd == 'missing': failed = core.get_failed(cfg, experiments, True) for exp in failed: print exp elif cmd == 'clean': if options.invalid: clean.clean_invalid_experiments(cfg, experiments) else: # TODO if no filter applied, ask if that's really what the # user wants. r = 'y' if options.filter == None and \ options.filter_inline == None: print "sure to wanna delete everything? [y/N]" r = utils.getch() #raw_input("Skip, quit, or continue? #[s/q/c]") if r == 'y': clean.clean_experiments(cfg, experiments) elif cmd == 'visit': if len(cargs) < 2: print "Specify a bash command after visit" sys.exit(1) elif len(cargs) > 2: print "Surround multi-term bash commands with \"\"." print "e.g., \"%s\"" % ' '.join(cargs[1:]) sys.exit(1) visit.visit_cmd_experiments(cfg, experiments, cargs[1]) elif cmd == 'info': info.show_info(cfg, experiments) elif cmd == 'status': info.print_status(cfg, experiments) else: print >> sys.stderr, "ERROR: wrong command. %s" % cargs[0] parser.print_help()
def post(self): data = Cmd.parser.parse_args() cmd = data['cmd'] std_path = data['std_path'] output_path = data['output_path'] module = data['module'] nolog = data['nolog'] activity = { 'cmd': cmd, 'std_path': std_path, 'output_path': output_path, 'status': 'Running' } if nolog == 'False': activities = utils.reading_json(current_path + '/storages/activities.json') if activities.get(module): activities[module].append(activity) else: activities[module] = [activity] utils.just_write(current_path + '/storages/activities.json', activities, is_json=True) slack.slack_noti('log', self.options, mess={ 'title': "{0} | {1} | Execute".format(self.options['TARGET'], module), 'content': '```{0}```'.format(cmd), }) utils.print_info("Execute: {0} ".format(cmd)) stdout = execute.run(cmd) # just ignore for testing purpose # stdout = "<< stdoutput >> << {0} >>".format(cmd) utils.check_output(output_path) if nolog == 'False': # change status of log activities = utils.reading_json(current_path + '/storages/activities.json') for item in activities[module]: if item['cmd'] == cmd: if stdout is None: item['status'] = 'Error' else: item['status'] = 'Done' try: if std_path != '': utils.just_write(std_path, stdout) slack.slack_file('std', self.options, mess={ 'title': "{0} | {1} | std".format(self.options['TARGET'], module), 'filename': '{0}'.format(std_path), }) if output_path != '': slack.slack_file('verbose-report', self.options, mess={ 'channel': self.options['VERBOSE_REPORT_CHANNEL'], 'filename': output_path }) except: pass utils.just_write(current_path + '/storages/activities.json', activities, is_json=True) return jsonify(status="200", output_path=output_path)
def start(self): 'Start the active window manager' self.__set_old() if self.active == 'compiz' and self.old and self[self.old].killcmd: if run(['which', self[self.old].killcmd[0]], 'call', quiet=True) == 0: run(self[self.old].killcmd, 'call') time.sleep(1) if self.active and self.old and 'noreplace' in self[self.active].flags: run(['killall', self[self.old].base], 'call') time.sleep(1) if self.active == 'compiz': # use a copy, not the original compiz_command = self['compiz'].command[:] for option in options: if options[option].enabled: if options[option].switch is not None: compiz_command.append(options[option].switch) kill_list = ['killall'] for decorator in decorators: kill_list.append(decorators[decorator].base) run(kill_list, 'call') time.sleep(0.5) # do it print ' ... executing:', ' '.join(compiz_command) run(compiz_command, quiet=False) if "reload mate panel" in options and options[ "reload mate panel"].enabled and psutil is not None: # Reload mate-panel, if requested and running pname = lambda p: p.name if type(p.name) == str else p.name( ) # for psutil <=1.2 if [ p for p in psutil.process_iter() if pname(p) == "mate-panel" ]: print " * Reloading mate-panel" run(["mate-panel", "--replace"], "spawn", True) else: print " * mate-panel reload is enabled, but panel is not running" elif self.active: run(self[self.active].command) else: print ' * No active WM set; not going to do anything.'
def kill_others(self): killall = ['killall'] for decorator in [x for x in self.decorators if x != self.name]: killall.append(self.decorators[decorator].base) run(killall, 'call')
def __init__(self): '''desktop: current desktop enviroment used to choose interface, fallback wm, and default decorator failsafe: boolean, True if in a failsafe session, currently only supports gnome failsafe mode. glxinfo: output of glxinfo command indirect_glxinfo: output of glxinfo with LIBGL_ALWAYS_INDIRECT xvinfo: output of xvinfo glx_vendor: 'client glx vendor:' usually one of SGI (for mesa-based drivers), NVIDIA Corporation, or ATI. tfp: 'direct' if texture_from_pixmap is present with direct rendering (implying presence with indirect as well), 'indirect' if only present with indirect context, False if not present at all Xgl: True in Xgl''' # Check gnome- and kde-specific vars, then try generic 'DESKTOP_SESSION' if GDSID in os.environ: self.desktop = 'gnome' elif 'KDE_FULL_SESSION' in os.environ: self.desktop = 'kde' else: self.desktop = os.environ.get('DESKTOP_SESSION', 'unknown') self.failsafe = False if self.desktop == 'gnome' and GDSID in os.environ and os.environ[ GDSID] == 'failsafe': self.failsafe = True if self.failsafe: failsafe_str = 'failsafe ' else: failsafe_str = '' # hack to eliminate inconsistency if self.desktop == 'kde4': self.desktop = 'kde' print ' * Detected Session: %s%s' % (failsafe_str, self.desktop) ## Save the output of glxinfo and xvinfo for later use: # don't try to run glxinfo unless it's installed if run(['which', 'glxinfo'], 'call', quiet=True) == 0: self.glxinfo = run('glxinfo', 'output') else: raise SystemExit, ' * Error: glxinfo not installed!' # make a temp environment indirect_environ = os.environ.copy() indirect_environ['LIBGL_ALWAYS_INDIRECT'] = '1' self.indirect_glxinfo = run('glxinfo', 'output', env=indirect_environ) if run(['which', 'xvinfo'], 'call', quiet=True) == 0: self.xvinfo = run('xvinfo', 'output') else: raise SystemExit, ' * Error: xvinfo not installed!' self.glx_vendor = None line = [ l for l in self.glxinfo.splitlines() if 'client glx vendor string:' in l ] if line: self.glx_vendor = ' '.join(line[0].split()[4:]) ## Texture From Pixmap / Indirect self.tfp = False if self.glxinfo.count(tfp) < 3: if self.indirect_glxinfo.count(tfp) == 3: self.tfp = 'indirect' else: self.tfp = 'direct' ## Xgl if 'Xgl' in self.xvinfo: self.Xgl = True else: self.Xgl = False
def start(self): 'Start the active window manager' self.__set_old() if self.active == 'compiz' and self.old and self[self.old].killcmd: if run(['which', self[self.old].killcmd[0]], 'call', quiet=True) == 0: run(self[self.old].killcmd, 'call') time.sleep(1) if self.active and self.old and 'noreplace' in self[self.active].flags: run(['killall', self[self.old].base], 'call') time.sleep(1) if self.active == 'compiz': # use a copy, not the original compiz_command = self['compiz'].command[:] for option in options: if options[option].enabled: compiz_command.append(options[option].switch) kill_list = ['killall'] for decorator in decorators: kill_list.append(decorators[decorator].base) run(kill_list, 'call') time.sleep(0.5) # do it print ' ... executing:', ' '.join(compiz_command) run(compiz_command, quiet=False) elif self.active: run(self[self.active].command) else: print ' * No active WM set; not going to do anything.'
def launcher(motor): get_linefollow() get_dist() import execute reload(execute) execute.run(ns.lf, 127, 127, 127, ns.ds, motor)
def launch(self): print ' * Launching %s' % self.label run(self.command)
def __init__(self, data): print ' * Searching for installed applications...' ### Compiz Detection bins = {} for name in ('compiz', 'compiz.real'): bin = run(['which', name], 'output') if bin: bins[name] = bin if 'compiz' in bins and 'compiz.real' in bins: if bins['compiz'].split(os.sep)[:-1] == bins['compiz.real'].split( os.sep)[:-1]: compiz = 'compiz.real' else: compiz = 'compiz' elif 'compiz.real' in bins: compiz = 'compiz.real' elif 'compiz' in bins: compiz = 'compiz' else: compiz = None output = '' for name in bins: if len(bins) > 1 and name == compiz: selected = ' <*>' else: selected = '' output += ' -- %s%s' % (bins[name], selected) ### mate-panel which = run(['which', 'mate-panel'], 'output') if which: output += ' -- %s' % which else: del data.options['reload mate panel'] ### Everything Else self.wms = data.wms.copy() for wm in data.wms: which = run(['which', data.wms[wm][0]], 'output') if which: output += ' -- %s' % which else: del self.wms[wm] if compiz: data.compiz_args.insert(0, compiz) self.wms['compiz'] = (compiz, data.compiz_args, 'Compiz', None, None, None) self.decorators = data.decorators.copy() for decorator in data.decorators: which = run(['which', data.decorators[decorator][0]], 'output') if which: output += ' -- %s' % which else: del self.decorators[decorator] self.apps = data.apps.copy() for app in data.apps: which = run(['which', data.apps[app][0]], 'output') if which: output += ' -- %s' % which else: del self.apps[app] if parser_options.verbose: print output.rstrip() compiz_optionlist = [] self.options = data.options.copy() if compiz: compiz_help = run([compiz, '--help'], 'output') for item in compiz_help.split(): item = item.strip(" \r\n\t") if item.startswith('--'): compiz_optionlist.append(item) for option in data.options: if data.options[option][ 1] not in compiz_optionlist and data.options[option][ 1] is not None: del self.options[option]
def git(repo_path, command, *args): """ Execute a git command and return the result as a string """ return execute.run("git", "-C", repo_path, command, *args)
def install(postvars, reservation_id): ec2.tag_reservation(reservation_id, 'user', remove=True) ec2.tag_reservation(reservation_id, 'status', 'Installing %(product-name)s...' % postvars) postvars['spark_hadoop'] = '--spark-hadoop' \ if 'spark-and-hadoop' in postvars else '' if len(postvars['advanced_nodes']['cluster']['nodes']) == 0: # calculate install values postvars['percent_analytics'] = float(postvars['hadoop-nodes']) / \ postvars['num_nodes'] postvars['percent_search'] = float(postvars['search-nodes']) / \ postvars['num_nodes'] postvars['percent_spark'] = float(postvars['spark-nodes']) / \ postvars['num_nodes'] install_command = 'ctool ' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo staging' \ ' --percent-analytics %(percent_analytics)s' \ ' --percent-search %(percent_search)s' \ ' --percent-spark %(percent_spark)s' \ ' %(spark_hadoop)s' \ ' --version_or_branch %(dse-version)s' \ ' --num-tokens %(num-of-tokens)s' \ ' %(full_name)s' \ ' %(product-name)s' install_command = install_command % postvars logger.info('Executing: %s', install_command) response = execute.run(install_command) flash(install_command) else: with NamedTemporaryFile() as f: postvars['config_file'] = f.name f.write(json.dumps(postvars['advanced_nodes'], indent=4, sort_keys=True)) f.flush() install_command = 'ctool' \ ' --provider %(cloud-option)s' \ ' install' \ ' --repo staging' \ ' --config-file %(config_file)s' \ ' %(spark_hadoop)s' \ ' --version_or_branch %(dse-version)s' \ ' --num-tokens %(num-of-tokens)s' \ ' %(full_name)s' \ ' %(product-name)s' install_command = install_command % postvars logger.info('Executing: %s', install_command) logger.debug('With config-file: \n%s', f.read()) response = execute.run(install_command) flash(install_command) flash('--config-file: %s' % json.dumps(postvars['advanced_nodes'])) if response.stderr: return response