def __call__(self, cmd, wrap_args=True): """Executes a command on the remote. Parameters ---------- cmd: list or str command to run on the remote Returns ------- tuple of str stdout, stderr of the command run. """ # TODO: Do we need to check for the connection to be open or just rely # on possible ssh failing? cmd_list = cmd if isinstance(cmd, list) \ else sh_split(cmd, posix=not on_windows) # windows check currently not needed, but keep it as a reminder # The safest best while dealing with any special characters is to wrap # entire argument into "" while escaping possibly present " inside. # I guess for the ` & and other symbols used in the shell -- yet to figure out # how to escape it reliably. if wrap_args: cmd_list = list(map(_wrap_str, cmd_list)) ssh_cmd = ["ssh"] + self.ctrl_options + [self.host] + cmd_list # TODO: pass expect parameters from above? # Hard to explain to toplevel users ... So for now, just set True return self.runner.run(ssh_cmd, expect_fail=True, expect_stderr=True)
def run_command(self, chans, name, match, direct, reply): cmd = match.group(1).lower() if not cmd in self.commands: return cmd, filt, error = self.commands[cmd] args = match.group(2) if not args: args = "" try: args = map(sh_quote, sh_split(args)) args = " " + " ".join(args) except ValueError: self.log_error("invalid arguments for \"%s\"" % (match.group(1),)) reply(error) return try: output = check_output(cmd + args, shell=True) output = filt(output) reply(output) except subprocess.CalledProcessError: self.log_error("could not run command \"%s\"" % (match.group(1),)) reply(error)
def get_stats(wild, root_dir='.', fmt='%n', sep='?'): argstring = "find ./ -name '" + wild + "' -exec stat -c '" + fmt + "' '{}' \;" res = run(sh_split(argstring), cwd=root_dir, stdout=PIPE, universal_newlines=True) return __parse(res.stdout, sep=sep)
def run(self): command = sh_split(' '.join(self.arguments[0:])) stdout = Popen(command, stdout=PIPE, stdin=open(os.devnull) ).communicate()[0] node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, ViewList(stdout.splitlines()), node) return node.children
def run(self): command = sh_split(' '.join(self.arguments[0:])) stdout = Popen(command, stdout=PIPE, stdin=open(os.devnull)).communicate()[0] node = nodes.section() node.document = self.state.document nested_parse_with_titles(self.state, ViewList(stdout.splitlines()), node) return node.children
def _next(self): conf = self.conf while True: # allow several failures # read one line is_end = False if self.streamer is None: # read from input try: line = input(">> ") if line.strip() == "": continue except EOFError: line, is_end = None, True break except KeyboardInterrupt: continue else: line, is_end = self.streamer.next_and_check() if is_end: return self.eos else: target, args, kwargs = None, [], {} cmd = line.strip() # find target if conf.assign_target: tmp_fields = cmd.split(conf.target_sep, 1) if len(tmp_fields) == 2: tmp_target, remainings = [ x.strip() for x in tmp_fields ] if str.isidentifier( tmp_target): # make sure it is identifier target = tmp_target # assign target line = remainings # split into args try: # try shell splitting tmp_args = sh_split(line) cur_i = 0 # collect *args while cur_i < len(tmp_args): cur_a = tmp_args[cur_i] if cur_a == conf.kwargs_sep: cur_i += 1 # skip this one! break else: args.append(cur_a) cur_i += 1 # collect **kwargs while cur_i < len(tmp_args): _k, _v = tmp_args[cur_i].split(conf.kwargs_kv_sep) kwargs[_k] = _v cur_i += 1 return (cmd, target, args, kwargs) except: zlog(f"Err in CMD-Parsing: {traceback.format_exc()}")
def get_grep(wild, root_dir='.', grep_options='-H', grep_string='object ', sep=':'): argstring = "find ./ -name '" + wild + "' -exec grep " + grep_options + " '" + grep_string + "' '{}' \;" res = run(sh_split(argstring), cwd=root_dir, stdout=PIPE, universal_newlines=True) return __parse(res.stdout, sep=sep)
def parse_arguments(args=None): """ Parses commands from the HTML POST form. """ parser = ArgumentParser() parser.add_argument("command", type=str) if "-i" in args or "--id" in args: parser.add_argument("-i", "--id", type=str, required=False, dest="_id", default=None) if "-n" in args or "--name" in args: parser.add_argument("-n", "--name", type=str, required=False, dest="name", default=None) if "-desc" in args or "--description" in args: parser.add_argument("-desc", "--description", type=str, required=False, dest="description", default=None) if "-dc" in args or "--date_created" in args: parser.add_argument("-dc", "--date_created", type=str, required=False, dest="date_created", default=None) if "-dd" in args or "--date_deadline" in args: parser.add_argument("-dd", "--date_deadline", type=str, required=False, dest="date_deadline", default=None) if "-t" in args or "--tags" in args: parser.add_argument("-t", "--tags", nargs="*", required=False, dest="tags") if isinstance(args, str): args = sh_split(args) args = vars(parser.parse_args(args)) command = args.pop("command") return command, args
def get_cmd(self): target, args = None, [] line = input(">> ") # first check assignment target cmd = line.strip() tmp_fields = cmd.split("=", 1) if len(tmp_fields) > 1: tmp_target, remainings = [x.strip() for x in tmp_fields] if str.isidentifier(tmp_target): target = tmp_target cmd = remainings # then split into args args = sh_split(cmd) return target, args, line
def parse_command(cls, bot_query: BotQuery) -> BotCommand: if bot_query.has_callback_query: # Parses the command at the end of the callback payload by design # eg. "Payload_data/trivia" will be parsed as a trivia command cmd_name = bot_query.callback_query.callback_data.rsplit( '/')[-1].lower() if cmd_name in AVAILABLE_COMMANDS: return AVAILABLE_COMMANDS[cmd_name](bot_query) return InvalidCommand(bot_query) args = sh_split(str(bot_query.message)) cmd_arg = args[0].lower() if len(args) > 0 else "" cmd_name = cmd_arg[1:] if len(cmd_arg) > 0 else "" if cmd_arg.startswith("/") and cmd_name in AVAILABLE_COMMANDS: return AVAILABLE_COMMANDS[cmd_name](bot_query, args) return InvalidCommand(bot_query, args)
def do_create(self, args): """ Create an object of any class""" l_args = sh_split(args) class_name = l_args[0] if not class_name: print("** class name missing **") return elif class_name not in HBNBCommand.classes: print("** class doesn't exist **") return d = {} for element in l_args[1:]: # seperating key and value for kwargs key_v = element.split("=") if HBNBCommand.types.get(key_v[0], "not found") != "not found": d[key_v[0]] = HBNBCommand.\ types[key_v[0]](key_v[1].replace('"', '').replace("_", ' ')) new_instance = HBNBCommand.classes[class_name]() new_instance.__dict__.update(**d) storage.new(new_instance) storage.save() print(new_instance.id)
def main(*args): conf = MainConf() conf.update_from_args(args) # -- if conf.load_pkl: collection = default_pickle_serializer.from_file(conf.load_pkl) else: reader = FrameReader() collection = reader.read_all(conf.dir, conf.onto) if conf.save_pkl: default_pickle_serializer.to_file(collection, conf.save_pkl) if conf.save_txt: with zopen(conf.save_txt, 'w') as fd: for f in collection.frames: fd.write("#--\n" + f.to_string() + "\n") # -- if conf.debug: breakpoint() if conf.query: map_frame = {f.name: f for f in collection.frames} map_lu = ZFrameCollectionHelper.build_lu_map(collection, split_lu={"pb":"_", "fn":None}[conf.onto]) map_role = ZFrameCollectionHelper.build_role_map(collection) while True: line = input(">> ") fields = sh_split(line.strip()) if len(fields) == 0: continue try: query0, query1 = fields _map = {'frame': map_frame, 'lu': map_lu, 'role': map_role}[query0] answer = _map.get(query1, None) if isinstance(answer, ZFrame): zlog(answer.to_string()) else: zlog(answer) except: zlog(f"Wrong cmd: {fields}") pass
def notify_shutdown(): '''Replaces previous Xmessage shutdown notifications''' warning_message = ("Battery power is critically low!\n" "Shutting down system in 1 minute...") cmd = 'notify-send -u critical "%s"' % warning_message call(sh_split(cmd))
def notify_bat(bat_value): '''Replaces previous Xmessage battery notifications''' warning_message = "Warning: Battery is at %d%%!" % bat_value cmd = 'notify-send -u normal "%s"' % warning_message call(sh_split(cmd))
def send_xmessage_shutdown(): cmd = ('xmessage -button ok -center "Shutting down in 1 minute...' ' (Click OK or close window to shutdown now)" -timeout 60') cmd = sh_split(cmd) call(cmd)
def test_ovx(): try: ip = "127.0.0.1" port = 6633 info("*** Add remote controller\n") c = RemoteController("c", ip=ip, port=port) net = Containernet( autoStaticArp=True, autoSetMacs=True, controller=None, link=TCLink ) net.addController(c) info("*** Add switches, hosts and links \n") # Add core switches cores = {} for switch in CORES: cores[switch] = net.addSwitch(switch, dpid=(CORES[switch]["dpid"] % "0")) # Add hosts and connect them to their core switch for switch in CORES: for count in range(1, FANOUT + 1): # Add hosts host = "h_%s_%s" % (switch, count) ip = "10.0.0.%s" % count mac = CORES[switch]["dpid"][4:] % count h = net.addDockerHost(host, dimage="dev_test", ip=ip, mac=mac) # Connect hosts to core switches net.addLink(cores[switch], h) # Connect core switches net.addLink(cores["SFO"], cores["SEA"]) net.addLink(cores["SEA"], cores["SLC"]) net.addLink(cores["SFO"], cores["LAX"]) net.addLink(cores["LAX"], cores["SLC"]) net.addLink(cores["LAX"], cores["IAH"]) net.addLink(cores["SLC"], cores["MCI"]) net.addLink(cores["MCI"], cores["IAH"]) net.addLink(cores["MCI"], cores["ORD"]) net.addLink(cores["IAH"], cores["ATL"]) net.addLink(cores["ORD"], cores["ATL"]) net.addLink(cores["ORD"], cores["CLE"]) net.addLink(cores["ATL"], cores["IAD"]) net.addLink(cores["CLE"], cores["IAD"]) net.addLink(cores["CLE"], cores["EWR"]) net.addLink(cores["EWR"], cores["IAD"]) info("*** Start network... \n") net.start() print( "Hosts configured with IPs, switches pointing to OpenVirteX at %s:%s" % (ip, port) ) info("[OVX] Create a virtual network between SEA and LAX\n") wd = os.getcwd() os.chdir(OVXCTL_DIR) commands = [ # Create virtual networks "python2 ovxctl.py createNetwork tcp:{}:{} 10.0.0.0 16".format( SDN_CONTROLLER_IP, SDN_CONTROLLER_PORT ), # Create virtual switches "python2 ovxctl.py -n createSwitch 1 00:00:00:00:00:00:01:00", "python2 ovxctl.py -n createSwitch 1 00:00:00:00:00:00:02:00", "python2 ovxctl.py -n createSwitch 1 00:00:00:00:00:00:03:00", # Create virtual ports "python2 ovxctl.py -n createPort 1 00:00:00:00:00:00:01:00 1", "python2 ovxctl.py -n createPort 1 00:00:00:00:00:00:01:00 5", "python2 ovxctl.py -n createPort 1 00:00:00:00:00:00:02:00 5", "python2 ovxctl.py -n createPort 1 00:00:00:00:00:00:02:00 6", "python2 ovxctl.py -n createPort 1 00:00:00:00:00:00:03:00 5", "python2 ovxctl.py -n createPort 1 00:00:00:00:00:00:03:00 2", # Create virtual links "python2 ovxctl.py -n connectLink 1 00:a4:23:05:00:00:00:01 2 00:a4:23:05:00:00:00:02 1 spf 1", "python2 ovxctl.py -n connectLink 1 00:a4:23:05:00:00:00:02 2 00:a4:23:05:00:00:00:03 1 spf 1", # Connect hosts "python2 ovxctl.py -n connectHost 1 00:a4:23:05:00:00:00:01 1 00:00:00:00:01:01", "python2 ovxctl.py -n connectHost 1 00:a4:23:05:00:00:00:03 2 00:00:00:00:03:02", # Start virtual network "python2 ovxctl.py -n startNetwork 1", ] for cmd in commands: ret = check_output(sh_split(cmd), encoding="utf-8") print(ret) os.chdir(wd) CLI(net) except Exception as e: error(e) finally: net.stop()
core = "{}/core".format(application) logs = "{}/logs".format(target) panel = "tmux new-window -t {} -n seccion".format(proyecto) plogs = "cd {}; tail -f error.log | grep -oP '(?<=AH01215:).*'".format(logs) vim = "cd {}; vim ." edit = "cd {}; vim -c" interactive = "; bash -i" panel_application = '{} "{}"'.format(panel, vim.format(application)) panel_modules = '{} "{}"'.format(panel, vim.format(modules)) panel_core = '{} "{}"'.format(panel, vim.format(core)) panel_static = '{} "{}"'.format(panel, vim.format(static)) panel_logs = '{} "{}"'.format(panel, plogs) comandos = [ "tmux new-session -A -d -s {} -c {} -n repo".format(proyecto, application), panel_logs.replace("seccion", "logs"), panel_application.replace("seccion", "application"), panel_modules.replace("seccion", "modules"), panel_core.replace("seccion", "core"), panel_static.replace("seccion", "static"), "tmux set-option -g pane-active-border-fg colour240", "tmux attach -t {}:0".format(proyecto, application) ] for indice, comando in enumerate(comandos): comandos[indice] = sh_split(comando) for comando in comandos: call(comando)
def FlagsForFile(path, **kwarg): flags = sh_split(check_output(["make", "print-cflags"])) return { 'flags': flags, 'do_cache': True }
def FlagsForFile(path, **kwarg): flags = sh_split(check_output(["make", "print-flags"])) flags.extend(("-Qunused-arguments", "-DDWT_USE_POPOVER=TRUE", "-DDWT_USE_OVERLAY=TRUE")) return {"flags": flags, "do_cache": True}
def FlagsForFile(path, **kwarg): flags = sh_split(check_output(["make", "print-flags"])) flags.append("-Qunused-arguments") return { 'flags': flags, 'do_cache': True }
def play_audio_clip(): cmd = sh_split("aplay -q %s" % WAV_FILE) call(cmd)
def notify_bat(bat_value): '''Replaces previous Xmessage battery notifications''' warning_message = LOW_WARNING_MSG % bat_value cmd = 'notify-send -u normal "%s"' % warning_message call(sh_split(cmd))
def notify_shutdown(): '''Replaces previous Xmessage shutdown notifications''' cmd = 'notify-send -u critical "%s"' % CRIT_BAT_MSG call(sh_split(cmd))
#!/usr/bin/python # Recompile and reload Intel igb network drivers after kernel upgrade # New kernel will only be loaded after reboot so have this run at startup from os.path import isfile import subprocess as sub from shlex import split as sh_split import os import sys cmd = sh_split("uname -a") cmd2 = ''' rmmod -v igb; rm -rfv igb-*/; tar -xvzf igb*.tar.gz && cd igb-*/src/ && make install && modprobe -v igb && /etc/init.d/networking restart; cd ../../; rm -rfv igb-*/; ''' kernel_version_file = "./kernel_version.txt" cur_dir = os.path.dirname(os.path.abspath(__file__)) def recompile_drivers(): ret_value = sub.call(cmd2, shell=True) def update_kernel_version_file(): p = sub.Popen(cmd, stdout=sub.PIPE)
def b3_bootstrap(bootstrap_action, s3bucket=None, s3prefix=None, session=None): """Converts to boto3 BootstrapAction format Receives a bootstrap action as loaded from the configuration file and returns a structure fit to be passed to boto3 BootstrapActions list. Files will be uploaded to S3 if needed. YAML OPTIONS: The action can be defined using one of "script", "dir", "s3" or "command". If an action has more than one defined only the first one will matter. name: string Bootstrap action name. If not given, the file name will be used. script: string Path to local file that will be uploaded to S3 and passed on to boto3. Arguments can be passed on inline or through the args key. Inline arguments will be inserted before the 'args' ones. dir: string Path to a local directory that contains scripts to be executed as bootstrap actions. For each file the function will call itself with the 'dir' key replaced with a 'script' key. The files will be ordered alphabetically, case insensitive. Arguments will be passed to every script, if defined. s3: string Path to script on S3. 's3://' will be added if it's not there. command: string Path to a file that already exists on the EMR host. Args: bootstrap_action (list): as read from the config file s3bucket (string): if a script/dir was passed, it will be uploaded to this bucket, unless overwritten by an action option s3prefix (string): prefix to be used for the action name if it has to be uploaded to S3. Can be overwritten for each action session (boto3.session): session to be used for s3 uploads, where needed Returns: list element(s) to be added to the list of bootstrap actions """ # 'actions' will be returned to the calling function actions = None if 's3bucket' in bootstrap_action: s3bucket = bootstrap_action['s3bucket'] if 's3prefix' in bootstrap_action: s3prefix = bootstrap_action['s3prefix'] # check that one and only one action is defined action_count = 0 if 'script' in bootstrap_action: action_count = action_count + 1 if 'dir' in bootstrap_action: action_count = action_count + 1 if 's3' in bootstrap_action: action_count = action_count + 1 if 'command' in bootstrap_action: action_count = action_count + 1 if action_count != 1: # TODO: improve message to point out exactly which action is borked raise KeyError('One and only one bootstrap action must be defined') # parse the action depending on type if 'script' in bootstrap_action: # upload the script to S3 and return the path actions = [] action_args = [] script_line = sh_split(bootstrap_action['script']) if len(script_line) == 0: # no script_line defined, return an empty dictionary warn('"script" type action has no script set. Ignoring it') return [] else: script = script_line[0] action_args = script_line[1:] + action_args if not s3bucket: # script defined, but we don't know where to upload it raise KeyError('Bucket undefined for script ' + script) name_on_s3 = bootstrap_action.get('name_on_s3', '_filename_') if name_on_s3.lower() == '_random_': action_path = upload_to_s3_rand(session, script, s3bucket, s3prefix) elif name_on_s3.lower() in [ '_script_', '_scriptname_', '_file_', '_filename_' ]: # TODO: this is a hack to avoid writing another upload function\ # or changing the current one. fix this hack s3prefix = s3prefix + path.basename(script) action_path = upload_to_s3_rand(session, script, s3bucket, s3prefix, rand_length=0) else: # TODO: this is a hack to avoid writing another upload function\ # or changing the current one. fix this hack s3prefix = s3prefix + name_on_s3 action_path = upload_to_s3_rand(session, script, s3bucket, s3prefix, rand_length=0) if not 's3://' in action_path: action_path = '{}{}'.format('s3://', action_path) if 'args' in bootstrap_action: if isinstance(bootstrap_action['args'], str): conf_args = sh_split(bootstrap_action['args']) else: conf_args = bootstrap_action['args'] action_args.extend(conf_args) if 'name' in bootstrap_action: action_name = bootstrap_action['name'] else: action_name = bootstrap_action['script'].replace(' ', '_') actions = [ { 'Name': action_name, 'ScriptBootstrapAction': { 'Path': action_path, 'Args': action_args } } ] # TODO: this will remove the action before it was executed on all # nodes. to be fixed if bootstrap_action.get('cleanup'): actions.extend([ { 'Name': '{}-cleanup'.format(action_name), 'ScriptBootstrapAction': { 'Path': 'file://aws', 'Args': ['s3', 'rm', action_path] } } ]) elif 'dir' in bootstrap_action: # Consider 'dir' as a shortcut for multiple 'script' lines. The 'dir' # key will be replaced with a 'script' key having the file path as # its value, then the function will call itself passing the resulting # 'script' action type as parameter. The return will be appended to # the existing actions list, which will then be returned actions = [] if not bootstrap_action['dir']: # directory not defined, ignore this action warn('"dir" type action has no directory set. Ignoring it') return [] for entry in sorted(listdir(bootstrap_action['dir']), key=lambda s: s.lower()): if not path.isfile(path.join(bootstrap_action['dir'], entry)) \ or entry[0] == '.': continue script_action = dict(bootstrap_action) del script_action['dir'] script_action['script'] = path.join(bootstrap_action['dir'], entry) action = b3_bootstrap(script_action, s3bucket, s3prefix, session=session) actions = actions + action elif 's3' in bootstrap_action: actions = [] s3_line = sh_split(bootstrap_action['s3']) if len(s3_line) == 0: warn('"s3" action has no s3 path set, ignoring it') return [] else: action_path = s3_line[0] action_args = s3_line[1:] + bootstrap_action.get('args', []) if not 's3://' in action_path: action_path = '{}{}'.format('s3://', action_path) if 'name' in bootstrap_action: action_name = bootstrap_action['name'] else: action_name = path.basename(action_path).replace(' ', '_') actions = [ { 'Name': action_name, 'ScriptBootstrapAction': { 'Path': action_path, 'Args': action_args } } ] elif 'command' in bootstrap_action: actions = [] cmd_line = sh_split(bootstrap_action['command']) if len(cmd_line) == 0: warn('"command" action has no command set, ignoring it') return [] else: action_path = cmd_line[0] action_args = cmd_line[1:] + bootstrap_action.get('args', []) if not 'file://' in action_path: action_path = '{}{}'.format('file://', action_path) if 'name' in bootstrap_action: action_name = bootstrap_action['name'] else: action_name = path.basename(action_path).replace(' ', '_') actions = [ { 'Name': action_name, 'ScriptBootstrapAction': { 'Path': action_path, 'Args': action_args } } ] return actions
def __run(self, test, compileflags, out): """ Run the examples in `test`. Write the outcome of each example with one of the `DocTestRunner.report_*` methods, using the writer function `out`. `compileflags` is the set of compiler flags that should be used to execute examples. Return a tuple `(f, t)`, where `t` is the number of examples tried, and `f` is the number of examples that failed. The examples are run in the namespace `test.globs`. """ # Keep track of the number of failures and tries. failures = tries = 0 # Save the option flags (since option directives can be used # to modify them). original_optionflags = self.optionflags SUCCESS, FAILURE, BOOM = range(3) # `outcome` state check = self._checker.check_output testenvironment = scripttest.TestFileEnvironment( base_path=self.directory) # Process each example. for examplenum, example in enumerate(test.examples): # If REPORT_ONLY_FIRST_FAILURE is set, then suppress # reporting after the first failure. quiet = self.optionflags & REPORT_ONLY_FIRST_FAILURE and failures > 0 # Merge in the example's options. self.optionflags = original_optionflags if example.options: for (optionflag, val) in example.options.items(): if val: self.optionflags |= optionflag else: self.optionflags &= ~optionflag # If 'SKIP' is set, then skip this example. if self.optionflags & SKIP: continue # Record that we started this example. tries += 1 if not quiet: self.report_start(out, test, example) # Run the example in the given context (globs), and record # any exception that gets raised. (But don't intercept # keyboard interrupts.) if self.optionflags & CREATE_FILE_BEFORE_TEST: split = sh_split(example.source) if split[0] == "cat" and (len(split) == 2 or len(split) >= 3 and split[2].startswith("#")): filename = split[1] with open(os.path.join(testenvironment.cwd, filename), "w") as file_to_write: file_to_write.write(example.want) else: raise ValueError( "Example requested file creation, " "which works only if the command is of the form " "`$ cat 'literal_filename'`", example.source, ) by_python_pseudoshell = False if self.optionflags & PSEUDOSHELL: split = sh_split(example.source) if split[0] == "cd" and (len(split) == 2 or len(split) > 2 and split[2].startswith("#")): dirname = os.path.join(testenvironment.cwd, split[1]) if os.path.exists(dirname) and os.path.isdir(dirname): testenvironment.cwd = dirname got = "" by_python_pseudoshell = True exception = 0 elif split[0] == "export" and (len(split) == 2 or len(split) > 2 and split[2].startswith("#")): variable, value = split[1].split("=") testenvironment.environ[variable] = value by_python_pseudoshell = True got = "" exception = 0 if example.source.startswith("python -m") and (self.optionflags & COVERAGE): data_file = os.path.abspath("./.coverage") coverage_file = os.path.abspath("./.coveragerc") with open(coverage_file, "w") as coveragerc: coveragerc.write(f"""[run] branch=True data_file={data_file:}""") example.source = example.source.replace( "python -m", f"coverage run -a --source lexedata --rcfile={coverage_file} -m", ) if not by_python_pseudoshell: # Don't blink! This is where the user's code gets run. try: # testenvironment does not run in shell mode. It's # better explicit than implicit anyway. output = testenvironment.run( "/bin/sh", "-c", example.source, expect_error=True, err_to_out=True, ) self.debugger.set_continue() # ==== Example Finished ==== exception = output.returncode except KeyboardInterrupt: raise got = output.stdout # the actual output self._fakeout.truncate(0) outcome = FAILURE # guilty until proven innocent or insane # If the example executed without raising any exceptions, # verify its output. if exception == 0: if check(example.want, got, self.optionflags): outcome = SUCCESS # The example raised an exception: check if it was expected. else: if check(example.want, got, self.optionflags): outcome = SUCCESS # Report the outcome. if outcome is SUCCESS: if not quiet: self.report_success(out, test, example, got) elif outcome is FAILURE: if not quiet: self.report_failure(out, test, example, got) failures += 1 elif outcome is BOOM: if not quiet: self.report_unexpected_exception(out, test, example, exception) failures += 1 else: assert False, ("unknown outcome", outcome) if failures and self.optionflags & FAIL_FAST: break # Restore the option flags (in case they were modified) self.optionflags = original_optionflags # Record and return the number of failures and tries. self.__record_outcome(test, failures, tries) return TestResults(failures, tries)
def suspend_system(): cmd = sh_split("systemctl suspend") call(cmd)
def FlagsForFile(path, **kwarg): flags = sh_split(check_output(["make", "print-cflags"])) return {'flags': flags, 'do_cache': True}
def send_xmessage_bat(bat_value): warning_message = "Warning: Battery is at %d%%" % bat_value cmd = 'xmessage -button ok -center "%s" -timeout 10' % warning_message cmd = sh_split(cmd) call(cmd)
def shutdown_system(): cmd = sh_split("systemctl poweroff") call(cmd)
def b3_step(yaml_step, s3bucket=None, s3prefix=None, session=None): """Converts step to boto3 structure. Converts an EMR 'step' from the simplified yaml format to the syntax needed to pass a step to boto3 EMR client methods like run_job_flow() or add_job_flow_steps(). EMR 4.x only. YAML structure: The step to be executed will be defined using one of 'exec', 'script', 'dir', 's3' or 'command'. For each step, only one of these should be defined. The first one encountered will determine the actions taken, the rest will be ignored. name: string, required name of the step. on_failure: string, case insensitive, default is "TERMINATE_CLUSTER" what to do if the step fails. valid values are (case insensitive): - terminate | terminate_cluster | terminate_job_flow - cancel | wait | cancel_and_wait - continue type: string, case insensitive, default is "CUSTOM_JAR" what kind of step this is. it can be a custom jar to be executed as-is, or it can be a script that will be passed to another application by "command-runner.jar". valid values (at the end of 2015) are: - custom_jar | custom | jar - streaming | hadoop-streaming | hadoop_streaming - hive | hive-script | hive_script - pig - impala - spark - shell - shell scripts are run using script-runner.jar NOT ALL OF THEM ARE IMPLEMENTED exec: string URI to the script/jar to be executed. Will be passed to boto3 as-is. script: string Path to local file that will be uploaded to S3 and passed on to boto3. Arguments can be passed inline or through the args key. Inline arguments will be inserted before the 'args' ones. dir: string Path to a local directory that contains scripts to be executed as steps. For each file the function will call itself with 'dir' replaced with 'script'. The files will be ordered alphabetically, case insensitive. Arguments will be passed to every script, if defined. s3: string Path to script on S3. 's3://' will be added if it's not there. command: string Path to a file that already exists on the EMR host. Arguments inline or as 'args' args: list arguments to be passed to the step. depending on the step `type` the list will be interpreted in different ways (or not) main_class: string the name of the main class in the specified Java file. if not specified, the JAR file should specify a Main-Class in its manifest file. will pe passed on as-is properties: list a list of Java properties that are set when the step runs. you can use these properties to pass key value pairs to your main function. will be passed on as-is Args: yaml_step (dict): the step as read from yaml file. s3bucket (string): name of the bucket that steps will be upladed to if they are local scripts s3prefix (string): 'directory' in the bucket session (session): an already defined session object for uploading Returns: a list element that can be added to the list of steps the cluster should execute """ # check that we received a dictionary, otherwise raise exception if not isinstance(yaml_step, dict): raise TypeError('Parameter should be a dict, but we received ', type(yaml_step)) # check for keys that we require, raise exception if not provided required_keys = ['name', 'type'] # name could be 'auto-completed' later missing_keys = [] for key in required_keys: if not key in yaml_step: missing_keys.append(key) require_one_of = ['exec', 'script', 'dir', 's3', 'command'] require_one_found = False for key in require_one_of: if key in yaml_step: require_one_found = True break if not require_one_found: missing_keys.append('|'.join(require_one_of)) if len(missing_keys) != 0: raise KeyError('Required data missing for this step: ', missing_keys) # if we got passed a local script or directory, we need to upload it to s3 if 'exec' in yaml_step: # leave everything as it is, allow user to specify whatever they want pass elif 'script' in yaml_step: # upload file to S3, create 'exec' key as s3:// path # first, check that we received an S3 bucket to upload to if 's3bucket' in yaml_step: s3bucket = yaml_step['s3bucket'] if 's3prefix' in yaml_step: s3prefix = yaml_step['s3prefix'] if not s3bucket: # script defined, but we don't know where to upload it raise KeyError('Bucket undefined for step script ' + script) # split 'script' in file and arguments, in case there are any script_line = sh_split(yaml_step['script']) if len(script_line) == 0: # no script_line defined, return an empty dictionary warn('"script" type step has no script set. Ignoring it') return [] else: script = script_line[0] yaml_step['args'] = script_line[1:] + yaml_step.get('args', []) # upload to s3 name_on_s3 = yaml_step.get('name_on_s3', '_random_') if name_on_s3.lower() == '_random_': step_path = upload_to_s3_rand(session, script, s3bucket, s3prefix) elif name_on_s3.lower() in [ '_script_', '_scriptname_', '_file_', '_filename_' ]: # TODO: this is a hack to avoid writing another upload function\ # or changing the current one. fix this hack s3prefix = s3prefix + path.basename(script) step_path = upload_to_s3_rand(session, script, s3bucket, s3prefix, rand_length=0) else: # TODO: this is a hack to avoid writing another upload function\ # or changing the current one. fix this hack s3prefix = s3prefix + name_on_s3 step_path = upload_to_s3_rand(session, script, s3bucket, s3prefix, rand_length=0) if not 's3://' in step_path: step_path = '{}{}'.format('s3://', step_path) # set 'exec' to the new path yaml_step['exec'] = step_path elif 'dir' in yaml_step: # call ourselves for each file in dir, replacing 'dir' with 'script' # add the converted step for each file to a list that will be returned if not yaml_step['dir']: # directory not defined, ignore this action warn('"dir" type step has no directory set. Ignoring it') return [] boto_steps = [] for entry in sorted(listdir(yaml_step['dir']), key=lambda s: s.lower()): if not path.isfile(path.join(yaml_step['dir'], entry)) \ or entry[0] == '.': continue script_step = dict(yaml_step) del script_step['dir'] script_step['script'] = path.join(yaml_step['dir'], entry) boto_step = b3_step(script_step, s3bucket, s3prefix, session=session) boto_steps.extend(boto_step) return boto_steps elif 's3' in yaml_step: # make sure the path starts with 's3://' and set 'exec' key script_line = sh_split(yaml_step['s3']) if len(script_line) == 0: warn('"s3" type step has no script set. Ignoring it') return [] else: script = script_line[0] yaml_step['args'] = script_line[1:] + yaml_step.get('args', []) if not 's3://' in script: script = 's3://' + script del yaml_step['s3'] yaml_step['exec'] = script elif 'command' in yaml_step: # start path with 'file://' and set 'exec' script_line = sh_split(yaml_step['command']) if len(script_line) == 0: warn('"command" type step has no command set. Ignoring it') return [] else: script = script_line[0] yaml_step['args'] = script_line[1:] + yaml_step.get('args', []) if not 'file://' in script: script = 'file://' + script del yaml_step['command'] yaml_step['exec'] = script # we have what we need, initialize the dictionary that will be returned boto_step = {'Name': yaml_step['name']} # by default, terminate cluster on step failure if 'on_failure' in yaml_step: on_failure = yaml_step['on_failure'].lower() else: on_failure = 'terminate_cluster' if on_failure in ['terminate', 'terminate_cluster', 'terminate_job_flow']: action_on_failure = 'TERMINATE_CLUSTER' elif on_failure in ['cancel', 'wait', 'cancel_and_wait']: action_on_failure = 'CANCEL_AND_WAIT' elif on_failure in ['continue']: action_on_failure = 'CONTINUE' else: # this step exists only for the warning warn('The value "{0}" for on_failure in step "{1}" is not valid. It ' 'will be set to "TERMINATE_CLUSTER".' ''.format(on_failure, yaml_step['name'])) action_on_failure = 'TERMINATE_CLUSTER' boto_step['ActionOnFailure'] = action_on_failure if 'type' in yaml_step: step_type = yaml_step['type'].lower() else: # type is required above, but might be optional in the future step_type = 'custom' # set jar and arguments according to 'type' # TODO: mostly incomplete, check # https://github.com/aws/aws-cli/blob/develop/awscli/customizations/emr/steputils.py # for more info on how to deal with the different types. # Or, go to console, create cluster, advanced options, add a step, # configure it, then look into the 'arguments' column for hints if step_type in ['custom', 'custom_jar', 'custom-jar', 'jar']: jar = yaml_step['exec'] args = yaml_step['args'] elif step_type in ['streaming', 'hadoop-streaming', 'hadoop_streaming']: # TODO: incomplete jar = 'command-runner.jar' args = [ 'hadoop-streaming' ] raise NotImplementedError(step_type) elif step_type in ['hive', 'hive-script', 'hive_script']: jar = 'command-runner.jar' print(yaml_step['exec']) args = [ 'hive-script', '--run-hive-script', '--args', '-f', yaml_step['exec'] ] for arg in yaml_step['args']: if not isinstance(arg, dict) or len(arg) != 1: warn('Expected a single key:value pair as argument in step ' '{0}. Received {1}, {2}. Ignoring it.'.format( yaml_step['name'], arg, type(arg))) continue if 'input' in arg: args.extend(['-d', 'INPUT=' + arg['input']]) elif 'output' in arg: args.extend(['-d', 'OUTPUT=' + arg['output']]) elif 'other' in arg: args.append(arg['other']) else: warn("Received argument {0} in step {1}. Don't know what to " 'do with it, Ignoring.'.format(arg, yaml_step['name'])) elif step_type in ['shell', 'shellscript', 'sh']: jar = 's3://elasticmapreduce/libs/script-runner/script-runner.jar' # 'exec' is string, make it list args = [yaml_step['exec']] + yaml_step['args'] else: raise NotImplementedError('Received type {0} in step {1}. This type is ' 'either invalid, or not yet implemented. Use "CUSTOM_JAR" or ' 'contact the programmers.'.format(step_type, yaml_step['name'])) hadoop_jar_step = {'Jar': jar, 'Args': args} if 'main_class' in yaml_step: hadoop_jar_step['MainClass'] = yaml_step['main_class'] if 'properties' in yaml_step: hadoop_jar_step['Properties'] = yaml_step['properties'] boto_step['HadoopJarStep'] = hadoop_jar_step return [boto_step] # list